ONNX 模型库
返回模型

说明文档

# -------------------------------------------------------------------------
# 版权所有 (c) Microsoft Corporation。保留所有权利。
# 根据 MIT 许可证授权。
# --------------------------------------------------------------------------
import os
from pathlib import Path

import torch
import torch.distributed as dist
from optimum.onnxruntime import ORTModelForCausalLM
from transformers import AutoConfig, AutoTokenizer, GenerationConfig

device_id = 0
device = torch.device(f\"cuda:{device_id}\")  # 如果在 CPU 上运行,请改为 torch.device(\"cpu\")

ep = \"CUDAExecutionProvider\"  # 如果在 CPU 上运行,请改为 CPUExecutionProvider
ep_options = {\"device_id\": device_id}

model_id = \"mistralai/Mistral-7B-Instruct-v0.2\"
model_path = \"./Olive/examples/llama2/models/qlora/qlora-conversion-transformers_optimization-bnb_quantization/gpu-cuda_model\"

model_path = Path(model_path)

if not (model_path / \"config.json\").exists():
    config = AutoConfig.from_pretrained(model_id)
    config.save_pretrained(model_path)
else:
    config = AutoConfig.from_pretrained(model_path)

if not (model_path / \"generation_config.json\").exists():
    gen_config = GenerationConfig.from_pretrained(model_id)
    gen_config.save_pretrained(model_path)
else:
    gen_config = GenerationConfig.from_pretrained(model_path)

tokenizer = AutoTokenizer.from_pretrained(model_id)

model = ORTModelForCausalLM.from_pretrained(
    model_path,
    config=config,
    generation_config=gen_config,
    use_io_binding=True,
    # provider=\"CUDAExecutionProvider\",
    provider=ep,
    provider_options={\"device_id\": device_id}
    # provider_options={\"device_id\": str(rank)},
)

Wanclouds/Mistral-7b-doc-ONNX

作者 Wanclouds

text-generation transformers
↓ 0 ♥ 0

创建时间: 2024-01-17 20:53:53+00:00

更新时间: 2024-02-23 10:28:17+00:00

在 Hugging Face 上查看

文件 (8)

.gitattributes
.ipynb_checkpoints/config-checkpoint.json
README.md
config.json
decoder.onnx ONNX
generation_config.json
model.onnx ONNX
model.onnx.data