@@ -276,8 +276,7 @@ def test_typeconvert_upcast(src_dtype, dst_dtype, device):
276276 # On HIP, fp8e4nv upcasting to fp32 is only supported on CDNA4, and
277277 # fp8e4nv upcasting to bf16 and fp16 is only supported on CDNA3 and CDNA4.
278278 if is_cuda ():
279- if ((src_dtype == 'float8e4nv' and torch .cuda .get_device_capability (0 ) < (8 , 9 ))
280- or src_dtype in ('float8e4b8' , 'float8e5b16' )):
279+ if src_dtype in ('float8e4b8' , 'float8e5b16' ):
281280 # If the dtype should error out in the given device, we assert that and return
282281 with pytest .raises (triton .CompilationError , match = "not supported in this architecture" ):
283282 launch_exhaustive_populate (getattr (tl , src_dtype ), 0 , 65536 , False , 8 , 0x7f , device = device )
@@ -333,12 +332,6 @@ def test_typeconvert_upcast(src_dtype, dst_dtype, device):
333332def test_typeconvert_downcast (src_dtype , dst_dtype , rounding , max_repr , device ):
334333
335334 if is_cuda ():
336- if src_dtype != 'float32' and torch .cuda .get_device_capability (0 ) < (9 , 0 ):
337- pytest .skip ("non-float32 downcast tests only supported on NVGPU with compute capability 9.0+" )
338-
339- if dst_dtype in ('float8e5' , 'float8e4nv' ) and rounding == 'rtne' and torch .cuda .get_device_capability (0 ) < (9 , 0 ):
340- pytest .skip (f"{ dst_dtype } downcast with RTNE rounding tests only supported on NVGPU with compute capability 9.0+" )
341-
342335 if dst_dtype in ('float8e5b16' , 'float8e4b8' ) and rounding == 'rtne' :
343336 pytest .skip (f"{ dst_dtype } downcast with RTNE rounding tests only supported on AMDGPU CDNA3" )
344337
0 commit comments