Skip to content

Commit d8c417d

Browse files
henrylhtsangfacebook-github-bot
authored andcommitted
rename is_b200 to is_blackwell
Summary: NA Reviewed By: sryap Differential Revision: D87106508
1 parent f686d01 commit d8c417d

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

tritonbench/operators/blackwell_attentions/operator.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@
120120
torch.cuda.is_available() and torch.version.cuda and torch.version.cuda >= "12.4"
121121
)
122122

123-
IS_B200 = is_cuda() and "B200" in get_nvidia_gpu_model()
123+
IS_BLACKWELL = is_cuda() and ("B200" in get_nvidia_gpu_model() or "B300" in get_nvidia_gpu_model())
124124

125125

126126
def parse_op_args(args: List[str]):
@@ -385,7 +385,7 @@ def xformers_splitk(
385385
)
386386

387387
@register_benchmark(
388-
enabled=IS_B200 and _is_sdpa_cudnn_attention_available(),
388+
enabled=IS_BLACKWELL and _is_sdpa_cudnn_attention_available(),
389389
label=f"cudnn-sdpa-{torch.backends.cudnn.version()}",
390390
)
391391
def cudnn_sdpa(self, q, k, v):
@@ -398,7 +398,7 @@ def cudnn_sdpa(self, q, k, v):
398398
)
399399

400400
@register_benchmark(
401-
enabled=(IS_B200 and HAS_FLASH_CUTE), label="FAv4", fwd_only=True
401+
enabled=(IS_BLACKWELL and HAS_FLASH_CUTE), label="FAv4", fwd_only=True
402402
)
403403
def cutedsl_blackwell(
404404
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor

0 commit comments

Comments
 (0)