意项
39.91M · 2026-03-23
欢迎关注公众号:爱学习的妮妮qiang
# 1. 创建用户及用户组
sudo groupadd HwHiAiUser
sudo useradd -g HwHiAiUser -d /home/HwHiAiUser -m -s /bin/bash HwHiAiUser
# 2. 安装驱动(等待几分钟)
sudo ./Ascend-hdk-310p-npu-driver_25.5.1_linux-aarch64.run --full --install-for-all --force
reboot # 重启
npu-smi info # 驱动验证
# 3. 安装固件
sudo ./Ascend-hdk-310p-npu-firmware_7.8.0.6.201.run --full
reboot
# 4. CANN驱动安装
sudo ./Ascend-cann_8.5.0_linux-aarch64.run --install
# 5. 安装优化算子驱动
sudo ./Ascend-cann-310p-ops_8.5.0_linux-aarch64.run --install
# 6. 设置驱动环境变量
source /usr/local/Ascend/ascend-toolkit/set_env.sh
echo 'source /usr/local/Ascend/ascend-toolkit/set_env.sh' >> ~/.bashrc
# 7. 验证驱动是否成功
npu-smi info
python3 -c "import acl; print('ACL Load OK')"
# 联网环境进行拉取
docker pull swr.cn-south-1.myhuaweicloud.com/ascendhub/mis-tei:7.3.0-300I-Duo-aarch64
# 镜像打包及传输
docker save swr.cn-south-1.myhuaweicloud.com/ascendhub/mis-tei:7.3.0-300I-Duo-aarch64 -o mis-tei.tar
# 客户环境加载
docker load -i mis-tei.tar
# 模型下载Qwen3-Embedding-8B,直接通过ModelScope进行下载,此处省略其过程
# 1. 编写/data/models/start_tei.sh的脚本
#!/bin/bash
source /usr/local/Ascend/ascend-toolkit/set_env.sh
export LD_PRELOAD=/usr/local/lib/python3.11/site-packages/scikit_learn.libs/libgomp-d22c30c5.so.1.0.0:/usr/lib/aarch64-linux-gnu/libgomp.so.1
export PYTHONPATH=/usr/local/Ascend/mxRag-7.3.0/ops/transformer_adapter:$PYTHONPATH
export RAG_SDK_HOME=/usr/local/Ascend/mxRag-7.3.0
export LD_LIBRARY_PATH=/usr/local/Ascend/nnal/atb/8.5.0/atb/cxx_abi_0/lib:/usr/local/Ascend/mxRag-7.3.0/ops/lib:$LD_LIBRARY_PATH
PORT=${PORT:-8080}
UDS_PATH=${UDS_PATH:-/tmp/text-embeddings-inference-server}
DEVICE_ID=${ASCEND_VISIBLE_DEVICES:-0}
export ASCEND_VISIBLE_DEVICES=$DEVICE_ID
export NPU_VISIBLE_DEVICES=$DEVICE_ID
export RANK=0
export LOCAL_RANK=0
exec /home/HwHiAiUser/.cargo/bin/text-embeddings-router
--model-id /home/HwHiAiUser/model/
--hostname 0.0.0.0
--port $PORT
--uds-path $UDS_PATH
--auto-truncate
--dtype float16
--max-batch-tokens 16384
# 2. 编写启动容器的脚本:此处8张卡上默认都启动了两个进程,总计16个进程(显卡资源需监测利用率)
for i in $(seq 0 15); do
docker stop tei-$i 2>/dev/null
docker rm tei-$i 2>/dev/null
done
for i in $(seq 0 15); do
DEVICE=$((i % 8))
PORT=$((8080 + i))
UDS="/tmp/tei-server-$i"
docker run -u root -d
-e ENABLE_BOOST=True
-e TEI_NPU_DEVICE=$DEVICE
-e PORT=$PORT
-e UDS_PATH=$UDS
--name=tei-$i
--net=host
--privileged
--device=/dev/davinci_manager
--device=/dev/hisi_hdc
--device=/dev/devmm_svm
--device=/dev/davinci$DEVICE
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver:ro
-v /usr/local/sbin:/usr/local/sbin:ro
-v /data/models/Qwen3-Embedding-8B:/home/HwHiAiUser/model
-v /data/models/start_tei.sh:/start_tei.sh
-v /etc/hostname:/etc/hostname:ro
-v /etc/hosts:/etc/hosts:ro
--entrypoint /bin/bash
swr.cn-south-1.myhuaweicloud.com/ascendhub/mis-tei:7.3.0-300I-Duo-aarch64
/start_tei.sh
echo "Started tei-$i on Device=$DEVICE PORT=$PORT"
sleep 5
done
# 1. 镜像拉取
docker pull docker.1ms.run/nginx:latest
# 2. nginx配置文件
mkdir -p /data/nginx/logs
cat > /data/nginx/tei.conf << 'EOF'
upstream tei_cluster {
least_conn;
server 10.100.122.3:8080;
server 10.100.122.3:8081;
server 10.100.122.3:8082;
server 10.100.122.3:8083;
server 10.100.122.3:8084;
server 10.100.122.3:8085;
server 10.100.122.3:8086;
server 10.100.122.3:8087;
server 10.100.122.3:8088;
server 10.100.122.3:8089;
server 10.100.122.3:8090;
server 10.100.122.3:8091;
server 10.100.122.3:8092;
server 10.100.122.3:8093;
server 10.100.122.3:8094;
server 10.100.122.3:8095;
keepalive 32;
}
server {
listen 8000;
location / {
proxy_pass ;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_read_timeout 120s;
proxy_connect_timeout 5s;
proxy_send_timeout 120s;
}
location /nginx_status {
stub_status on;
allow 127.0.0.1;
deny all;
}
}
EOF
# 3. nginx容器启动
docker run -d
--name=nginx-tei
--net=host
-v /data/nginx/tei.conf:/etc/nginx/conf.d/tei.conf:ro
-v /data/nginx/logs:/var/log/nginx
docker.1ms.run/nginx:latest
# 效果测试,需要对比研发环境与客户环境两个Embedding模型输出向量的差异性。
# 经实验测试,二者的精度在小数点后第4位,影响较小
curl -X POST -H "Content-Type: application/json" -d '{"inputs": ["hello", "你好"]}'