Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support pytorch acceleration on M1 mac hardware #14

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions empanada/models/panoptic_bifpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
)

__all__ = [
'PanopticBiFPN',
'PanopticBiFPNPR'
'PanopticBiFPN', # simpler model that doesn't upsample. Uses PanopticDeepLabEngine (+ also won't upsample output automatically)
'PanopticBiFPNPR' # what the empanada-napari plugin currently uses. Uses PanopticDeepLabRenderEngine
]

class _BaseModel(nn.Module):
Expand Down
9 changes: 8 additions & 1 deletion projects/mitonet/scripts/evaluate3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,14 @@ def parse_args():
del state_dict[k]

msg = model.load_state_dict(state['state_dict'], strict=True)
model.to('cuda' if torch.cuda.is_available() else 'cpu') # move model to GPU 0
# check whether GPU or M1 Mac hardware is available
if torch.cuda.is_available():
device = torch.device('cuda:0')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
model.to(device)

# set the evaluation transforms
norms = state['norms']
Expand Down
9 changes: 8 additions & 1 deletion projects/mitonet/scripts/evaluate3d_bc.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,14 @@ def parse_args():
del state_dict[k]

msg = model.load_state_dict(state['state_dict'], strict=True)
model.to('cuda' if torch.cuda.device_count() > 0 else 'cpu') # move model to GPU 0
# check whether GPU or M1 Mac hardware is available
if torch.cuda.is_available():
device = torch.device('cuda:0')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
model.to(device)

# set the evaluation transforms
norms = state['norms']
Expand Down
20 changes: 17 additions & 3 deletions projects/mitonet/scripts/legacy_data/filter_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,14 @@
# load the weights from online
state_dict = torch.hub.load_state_dict_from_url(DEFAULT_WEIGHTS, map_location='cpu')
msg = model.load_state_dict(state_dict)
model = model.to('cuda:0' if torch.cuda.is_available() else 'cpu')
# check whether GPU or M1 Mac hardware is available
if torch.cuda.is_available():
device = torch.device('cuda:0')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
model = model.to(device)
model = model.eval()
cudnn.benchmark = True

Expand Down Expand Up @@ -97,8 +104,15 @@ def __getitem__(self, idx):
tst_predictions = []
for data in tqdm(test, total=len(test)):
with torch.no_grad():
# load data onto gpu then forward pass
images = data['image'].to('cuda:0' if torch.cuda.is_available() else 'cpu', non_blocking=True)
# check whether GPU or M1 Mac hardware is available
if torch.cuda.is_available():
device = torch.device('cuda:0')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
# load data onto backend then do the forward pass
images = data['image'].to(device)
output = model(images)
predictions = nn.Sigmoid()(output)

Expand Down
17 changes: 14 additions & 3 deletions scripts/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,20 @@ def main():
main_worker(config)

def main_worker(config):
config['device'] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if str(config['device']) == 'cpu':
# check whether GPU or M1 Mac hardware is available
if torch.cuda.is_available():
device = torch.device('cuda:0')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
config['device'] = device

if str(config['device']) == 'cuda:0':
print("Using GPU for training.")
elif str(config['device']) == 'mps':
print("Using M1 Mac hardware for training.")
elif str(config['device']) == 'cpu':
print(f"Using CPU for training.")

# setup the model and pick dataset class
Expand Down
10 changes: 8 additions & 2 deletions scripts/pdl_inference3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,14 @@ def parse_args():
# read the model config file
config = load_config(args.config)

# set device and determine model to load
device = torch.device("cuda:0" if torch.cuda.is_available() and not args.use_cpu else "cpu")
# check whether GPU or M1 Mac hardware is available
if torch.cuda.is_available():
device = torch.device('cuda:0')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
# determine model to load
use_quantized = str(device) == 'cpu' and config.get('model_quantized') is not None
model_key = 'model_quantized' if use_quantized else 'model'

Expand Down