ONNX 模型库
返回模型

说明文档

可在移动设备或迷你电脑上运行,比普通版本快得多。 使用该模型的示例代码:

from transformers import AutoTokenizer, pipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer,GenerationConfig,TextIteratorStreamer
from optimum.onnxruntime import ORTModelForCausalLM
from transformers import logging
logging.set_verbosity_error()
import time
model_id=\"brianwoo/GPT2-Onnx-Quantized\"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = ORTModelForCausalLM.from_pretrained(model_id)
streamer = TextStreamer(tokenizer,skip_prompt=True, skip_special_tokens=True,return_text=True)

onnx_gen = pipeline(\"text-generation\", model=model, tokenizer=tokenizer,streamer=streamer,return_text=True)

while True:
    text = input(\"\nBrian:\")
    print(\"Bot:\n\")
    t0=time.time()
    gen = onnx_gen(text)
    t=time.time()-t0
    text=gen[0][\"generated_text\"]
    print(t,len(text.split(\" \"))/t,\"words /sec\")


我在 Colab 上的量化模式示例代码:

from optimum.onnxruntime import ORTStableDiffusionXLImg2ImgPipeline , ORTStableDiffusionXLPipeline,ORTModelForCausalLM
from transformers import AutoTokenizer
from optimum.onnxruntime import ORTOptimizer
from optimum.onnxruntime.configuration import OptimizationConfig
from optimum.onnxruntime.configuration import AutoQuantizationConfig
from optimum.onnxruntime import ORTQuantizer
import os,torch
from pathlib import Path
Gbase=\"/gdrive/MyDrive/onnx/\"

model_checkpoint = \"gpt2\"
save_directory = Gbase+\"onnx/gpt2_arm64\"

tasks=['TinyLlama/TinyLlama-1.1B-Chat-v0.6',
'pankajmathur/orca_mini_3b',
'Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4',
'CobraMamba/mamba-gpt-3b-v4',
'WizardLM/WizardCoder-3B-V1.0',
'GeneZC/MiniChat-3B']




#TinyLlama/TinyLlama-1.1B-Chat-v0.6
#pankajmathur/orca_mini_3b
#Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4
#CobraMamba/mamba-gpt-3b-v4
#WizardLM/WizardCoder-3B-V1.0
#GeneZC/MiniChat-3B

def quantModel(model_checkpoint=model_checkpoint,save_directory=save_directory):
    tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
    tokenizer.save_pretrained(save_directory)
    ort_model = ORTModelForCausalLM.from_pretrained(model_checkpoint, export=True)

    optimizer = ORTOptimizer.from_pretrained(ort_model)
    optimization_config = OptimizationConfig(optimization_level=3)

    qconfig = AutoQuantizationConfig.arm64(is_static=False, per_channel=False)

    quantizer = ORTQuantizer.from_pretrained(ort_model)

    quantizer.quantize(save_dir=save_directory, quantization_config=qconfig,use_external_data_format=True)

def  doTasks(tasks=tasks,Gbase=Gbase):
    for model_checkpoint in tasks:
        save_directory=os.path.join(Gbase,Path(model_checkpoint).name)
        try:quantModel(model_checkpoint=model_checkpoint,save_directory=save_directory)
        except:
            import traceback
            traceback.print_exc()


doTasks()

在 Termux 上安装 Debian 或 Ubuntu,并配置一个相对完善的开发环境。 不保证始终有效,安装后别忘了备份。 下载并安装 Termux 和 Termux Widget,Termux API,将 Termux-Widget 添加到您的主屏幕

https://f-droid.org/en/packages/com.termux/ https://f-droid.org/en/packages/com.termux.widget/ https://f-droid.org/packages/com.termux.api/

打开 Termux 安装 proot-distro,不断尝试 termux-change-repo 直到能够安装 proot-distro,在 proot-distro 中安装 Debian,复制以下所有内容并运行: 由于过程复杂,必须逐步完成,只修改了命令,不再重复描述过程,因为重装系统需要大量时间,不可能在每个环节都完美,只是尝试了几次,最终实现了所需功能,安装过程大约一到两小时。 http://blog.brianwoo.net/2023/06/termux-debian-vscode-llm-latest-working.html

####Debian:

termux-setup-storage
termux-wake-lock
termux-change-repo
pkg install tur-repo
pkg update && pkg upgrade 
pkg install git proot-distro termimage vim termux-api



disname='debian'
user='brian'
echo \"proot-distro login $disname\"  > .shortcuts/debianai.sh
echo  \"proot-distro login debian --user $user\" > .shortcuts/debian.sh
proot-distro install debian
proot-distro login debian



user='brian'
apt update && apt upgrade
apt install sudo locales
echo \"LANG=zh_CN.UTF-8\" >> /etc/locale.conf
sudo locale-gen
adduser $user
gpasswd -a $user sudo
echo \"$user   ALL=(ALL:ALL) ALL\" >> /etc/sudoers
login $user





echo 'source llm/bin/activate && source .cargo/env &&  cd /sdcard/Documents/Pydroid3/llm' > llm.env
sudo locale-gen zh_CN.UTF-8
sudo apt update && apt upgrade
sudo apt install python3-full git curl vim wget
sudo apt install python-is-python3
sudo apt install python3-pip
sudo apt install  clang wget git cmake
sudo apt install  opencl-headers
sudo apt install  libopenblas-dev libopenblas0 libopenblas64-0 libblis-openmp-dev
sudo apt install python3-torch python3-torchaudio python3-torchtext python3-torchvision
sudo apt install libideep-dev libtorch-dev libonnx1
sudo apt install pandoc build-essential
sudo apt install libopenblas-dev libopenblas-openmp-dev libopenblas0 libopenblas64-0-openmp libopenblas64-dev libopenblas64-openmp-dev 
sudo apt install opencl-c-headers opencl-clhpp-headers libasl-dev libasl0 libclblast-dev libclc-13 
sudo apt install pandoc
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
cd
git clone https://github.com/flame/blis
cd blis
./configure --enable-cblas -t openmp auto
make -j
sudo make install
rm -rf CLBlast/
rm -rf llama.cpp
git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp
mkdir build 
make LLAMA_BLIS=1 -j   -B
cd
python -m venv llm
source llm/bin/activate


pip uninstall ctransformers llama-cpp-python
rm -rf ctransformers
git clone --recurse-submodules https://github.com/marella/ctransformers
cd ctransformers 
bash ./scripts/build.sh
cd 
python -m pip install ./ctransformers/
CMAKE_ARGS=\"-DLLAMA_BLAS=ON\" pip install llama-cpp-python 

pip install --upgrade pip
pip install --upgrade diffusers[torch] 
pip install --upgrade  accelerate peft openvino optimum onnx onnxruntime nncf
pip install opencv-python fastapi uvicorn flask
pip install fastapi python-multipart pydantic sqlalchemy opencc-python-reimplemented pandas 
pip install --upgrade pip
pip install fast-sentence-transformers
pip install langchain
pip install wikipedia  unstructured pypdf pdf2image pdfminer chromadb qdrant-client lark momento annoy
pip install doc2text pypandoc pandoc
pip install opencv-python fastapi uvicorn flask
pip install fastapi python-multipart pydantic sqlalchemy opencc-python-reimplemented pandas 
pip install --upgrade-strategy eager install optimum[onnxruntime]
pip install optimum-intel
pip install --upgrade-strategy eager optimum[openvino,nncf]
pip install --force-reinstall transformers[torch]
pip install --force-reinstall diffusers[torch] 

####Ubuntu :


termux-setup-storage
termux-wake-lock
termux-change-repo
pkg install tur-repo
pkg update && pkg upgrade 
pkg install git proot-distro termimage vim termux-api



disname='ubuntu'
user='brian'
echo \"proot-distro login $disname\"  > .shortcuts/debianai.sh
echo  \"proot-distro login ubuntu --user $user\" > .shortcuts/ubuntu.sh
proot-distro install ubuntu
proot-distro login ubuntu




user='brian'
apt update && apt upgrade
apt install sudo locales adduser
echo \"LANG=zh_CN.UTF-8\" >> /etc/locale.conf
sudo locale-gen
adduser $user
sudo gpasswd -a $user sudo
echo \"$user   ALL=(ALL:ALL) ALL\" >> /etc/sudoers
login $user





echo 'source llm/bin/activate && source .cargo/env &&  cd /sdcard/Documents/Pydroid3/llm' > llm.env
sudo locale-gen zh_CN.UTF-8
sudo apt update && apt upgrade
sudo apt install python3-full git curl vim wget
sudo apt install python-is-python3
sudo apt install python3-pip
sudo apt install  clang wget git cmake
sudo apt install  opencl-headers
sudo apt install  libopenblas-dev libopenblas0 libopenblas64-0 libblis-openmp-dev
sudo apt install python3-torch python3-torchaudio python3-torchtext python3-torchvision
sudo apt install libideep-dev libtorch-dev libonnx1
sudo apt install pandoc build-essential
sudo apt install libopenblas-dev libopenblas-openmp-dev libopenblas0 libopenblas64-0-openmp libopenblas64-dev libopenblas64-openmp-dev 
sudo apt install opencl-c-headers opencl-clhpp-headers libasl-dev libasl0 libclblast-dev libclc-13 
sudo apt install pandoc
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
cd
git clone https://github.com/flame/blis
cd blis
./configure --enable-cblas -t openmp auto
make -j
sudo make install
rm -rf CLBlast/
rm -rf llama.cpp
git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp
mkdir build 
make LLAMA_BLIS=1 -j   -B
cd
python -m venv llm
source llm/bin/activate


pip uninstall ctransformers llama-cpp-python
rm -rf ctransformers
git clone --recurse-submodules https://github.com/marella/ctransformers
cd ctransformers 
bash ./scripts/build.sh
cd 
python -m pip install ./ctransformers/
CMAKE_ARGS=\"-DLLAMA_BLAS=ON\" pip install llama-cpp-python 

pip install --upgrade pip
pip install --upgrade diffusers[torch] 
pip install --upgrade  accelerate peft openvino optimum onnx onnxruntime nncf
pip install opencv-python fastapi uvicorn flask
pip install fastapi python-multipart pydantic sqlalchemy opencc-python-reimplemented pandas 
pip install --upgrade pip
pip install fast-sentence-transformers
pip install langchain
pip install wikipedia  unstructured pypdf pdf2image pdfminer chromadb qdrant-client lark momento annoy
pip install doc2text pypandoc pandoc
pip install opencv-python fastapi uvicorn flask
pip install fastapi python-multipart pydantic sqlalchemy opencc-python-reimplemented pandas 
pip install --upgrade-strategy eager install optimum[onnxruntime]
pip install optimum-intel
pip install --upgrade-strategy eager optimum[openvino,nncf]
pip install --force-reinstall transformers[torch]
pip install --force-reinstall diffusers[torch] 

brianwoo/GPT2-Onnx-Quantized

作者 brianwoo

text-generation transformers
↓ 1 ♥ 0

创建时间: 2023-12-05 21:42:11+00:00

更新时间: 2023-12-07 04:55:00+00:00

在 Hugging Face 上查看

文件 (10)

.gitattributes
README.md
config.json
merges.txt
model_quantized.onnx ONNX
ort_config.json
special_tokens_map.json
tokenizer.json
tokenizer_config.json
vocab.json