Fix demos for CPU inference (#104)

This commit is contained in:
will ye
2025-05-26 12:24:30 -04:00
committed by GitHub
parent 75aaf0c3ae
commit 2111d9c52c
2 changed files with 4 additions and 3 deletions

View File

@@ -44,7 +44,7 @@ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
# use bfloat16
torch.autocast(device_type=DEVICE, dtype=torch.bfloat16).__enter__()
if torch.cuda.get_device_properties(0).major >= 8:
if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:
# turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True