原理
.to(device) 可以指定CPU 或者GPU
device
= torch
.device
("cuda:0" if torch
.cuda
.is_available
() else "cpu")
model
.to
(device
)
if torch
.cuda
.device_count
() > 1:
model
= nn
.DataParallel
(model,device_ids
=[0,1,2])
model
.to
(device
)
.cuda() 只能指定GPU
os
.environ
['CUDA_VISIBLE_DEVICE']='1'
model
.cuda
()
os
.environment
['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
device_ids
= [0,1,2,3]
net
= torch
.nn
.Dataparallel
(net
, device_ids
=device_ids
)
net
= torch
.nn
.Dataparallel
(net
)
net
= net
.cuda
()
class DataParallel(Module
):
def __init__(self
, module
, device_ids
=None, output_device
=None, dim
=0):
super(DataParallel
, self
).__init__
()
if not torch
.cuda
.is_available
():
self
.module
= module
self
.device_ids
= []
return
if device_ids
is None:
device_ids
= list(range(torch
.cuda
.device_count
()))
if output_device
is None:
output_device
= device_ids
[0]
转载请注明原文地址: https://lol.8miu.com/read-34108.html