diff --git a/.github/workflows/nix_build.yaml b/.github/workflows/nix_build.yaml index e0076af6e61..b8a10f65d78 100644 --- a/.github/workflows/nix_build.yaml +++ b/.github/workflows/nix_build.yaml @@ -23,7 +23,7 @@ jobs: with: name: huggingface # If you chose signing key for write access - authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + # authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' env: USER: github_runner - name: Build diff --git a/.github/workflows/nix_cache.yaml b/.github/workflows/nix_cache.yaml index 7c73e5842cb..9a76e7c18e9 100644 --- a/.github/workflows/nix_cache.yaml +++ b/.github/workflows/nix_cache.yaml @@ -22,7 +22,7 @@ jobs: with: name: huggingface # If you chose signing key for write access - authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}" + #authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}" env: USER: github_runner - name: Build impure devshell diff --git a/.github/workflows/nix_tests.yaml b/.github/workflows/nix_tests.yaml index 4f68ff60b4e..72d75f53bee 100644 --- a/.github/workflows/nix_tests.yaml +++ b/.github/workflows/nix_tests.yaml @@ -27,9 +27,11 @@ jobs: with: name: huggingface # If you chose signing key for write access - authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + #authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' env: USER: github_runner + - name: Nix info + run: nix-shell -p nix-info --run "nix-info -m" - name: Build run: nix develop .#test --command echo "Ok" - name: Pre-commit tests. diff --git a/docs/source/reference/launcher.md b/docs/source/reference/launcher.md index 51bd461fcdd..5b7321b73a3 100644 --- a/docs/source/reference/launcher.md +++ b/docs/source/reference/launcher.md @@ -58,8 +58,6 @@ Options: Quantization method to use for the model. It is not necessary to specify this option for pre-quantized models, since the quantization method is read from the model configuration. Marlin kernels will be used automatically for GPTQ/AWQ models. - - [env: QUANTIZE=] Possible values: - awq: 4 bit quantization. Requires a specific AWQ quantized model: . Should replace GPTQ models wherever possible because of the better latency @@ -72,6 +70,8 @@ Options: - bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model - fp8: [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above This dtype has native ops should be the fastest if available. This is currently not the fastest because of local unpacking + padding to satisfy matrix multiplication limitations + + [env: QUANTIZE=] ``` ## SPECULATE @@ -456,14 +456,14 @@ Options: ```shell --usage-stats Control if anonymous usage stats are collected. Options are "on", "off" and "no-stack" Defaul is on - - [env: USAGE_STATS=] - [default: on] Possible values: - on: Default option, usage statistics are collected anonymously - off: Disables all collection of usage statistics - no-stack: Doesn't send the error stack trace or error type, but allows sending a crash event + + [env: USAGE_STATS=] + [default: on] ``` ## PAYLOAD_LIMIT