set FORCE_CMAKE=1
$env:CMAKE_ARGS = '-DGGML_CUDA=on -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_FMA=off -DCMAKE_GENERATOR_TOOLSET="cuda=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.6"'
pip install llama-cpp-python --no-cache-dir --force-reinstall --verbose