Skip to content

Commit

Permalink
[GPTQ, CompressedTensors] Fix unsafe imports and metada check (#3…
Browse files Browse the repository at this point in the history
…4815)

* fix gptq creation when optimum is not installed + fix metadata checking

* fix compressed tensors as well

* style

* pray for ci luck on flaky tests :prayge:

* trigger ci

---------

Co-authored-by: Marc Sun <[email protected]>
Co-authored-by: Mohamed Mekkouri <[email protected]>
  • Loading branch information
3 people authored Dec 24, 2024
1 parent 6e0515e commit 24c91f0
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 4 deletions.
7 changes: 7 additions & 0 deletions src/transformers/quantizers/quantizer_compressed_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,13 @@ class CompressedTensorsHfQuantizer(HfQuantizer):

def __init__(self, quantization_config: CompressedTensorsConfig, **kwargs):
super().__init__(quantization_config, **kwargs)

if not is_compressed_tensors_available():
raise ImportError(
"Using `compressed_tensors` quantized models requires the compressed-tensors library: "
"`pip install compressed-tensors`"
)

from compressed_tensors.compressors import ModelCompressor

self.compressor = ModelCompressor.from_compression_config(quantization_config)
Expand Down
15 changes: 11 additions & 4 deletions src/transformers/quantizers/quantizer_gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,25 @@ class GptqHfQuantizer(HfQuantizer):

def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)

if not is_optimum_available():
raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
from optimum.gptq import GPTQQuantizer

self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum())

def validate_environment(self, *args, **kwargs):
if not is_optimum_available():
raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")

if not is_auto_gptq_available():
raise ImportError(
"Loading a GPTQ quantized model requires the auto-gptq library (`pip install auto-gptq`)"
)

gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2")
if not gptq_supports_cpu and not torch.cuda.is_available():
raise RuntimeError("GPU is required to quantize or run quantize model.")
elif not (is_optimum_available() and is_auto_gptq_available()):
raise ImportError(
"Loading a GPTQ quantized model requires optimum (`pip install optimum`) and auto-gptq library (`pip install auto-gptq`)"
)
elif version.parse(importlib.metadata.version("auto_gptq")) < version.parse("0.4.2"):
raise ImportError(
"You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq`"
Expand Down

0 comments on commit 24c91f0

Please sign in to comment.