We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0ca8c35 commit 137705fCopy full SHA for 137705f
src/compressed_tensors/compressors/quantized_compressors/int4_quantized.py
@@ -126,11 +126,6 @@ def compress_weight(
126
:param device: optional device to move compressed output to
127
:return: dictionary of compressed weight data
128
"""
129
- if global_scale is not None:
130
- raise ValueError(
131
- "global_scale is not supported for the PackQuantizationCompressor"
132
- )
133
-
134
compressed_dict = {}
135
if can_quantize(weight, quantization_args):
136
quantized_weight = quantize(
0 commit comments