Qwen 3 Series

Qwen3 redefines open LLMs with dynamic thinking modes, excelling in code, math, and multilingual reasoning. Powered by sparse 22B active parameters, it balances blazing speed with deep intelligence — fully open-source, from lightweight to 235B giants.

1. Basic usage: Forward with OpenAI compatible format.
2. Tool call: Regular tools support the OpenAI-compatible format, while MCP Tools rely on qwen-agent and require installing dependencies first using the command: pip install -U qwen-agent mcp. For more details, please refer to Ali official documentation

from openai import OpenAI

client = OpenAI(
    api_key="sk-***", # 🔑 Replace it by your AiHubMix Key
    base_url="https://aihubmix.com/v1",
)

completion = client.chat.completions.create(
    model="Qwen/Qwen3-30B-A3B",
    messages=[
        {
            "role": "user",
            "content": "Explain the Occam's Razor concept and provide everyday examples of it"
        }
    ],
    stream=True
)

for chunk in completion:
    if hasattr(chunk.choices, '__len__') and len(chunk.choices) > 0:
        if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content is not None:
            print(chunk.choices[0].delta.content, end="")

Qwen 2.5 and QwQ/QvQ Series

Use the OpenAI compatible format to forward, the difference is that the streaming call needs to extract chunk.choices[0].delta.content, refer to the following.

1. QvQ,Qwen 2.5 VL: Image recognition.
2. QwQ: Text task.

from openai import OpenAI
import base64
import os

client = OpenAI(
    api_key="sk-***", # 🔑 Replace it by your AiHubMix Key
    base_url="https://aihubmix.com/v1",
)

image_path = "yourpath/file.png"

def encode_image(image_path):
    if not os.path.exists(image_path):
        raise FileNotFoundError(f"Image file does not exist: {image_path}")
    
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

# Get the base64 encoding of the image
base64_image = encode_image(image_path)

completion = client.chat.completions.create(
    model="qwen2.5-vl-72b-instruct", #qwen2.5-vl-72b-instruct OR Qwen/QVQ-72B-Preview
    messages=[
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "Please describe this image in detail"},
                {
                    "type": "image_url",
                    "image_url": {
                        "url": f"data:image/png;base64,{base64_image}"
                    }
                }
            ]
        }
    ],
    stream=True
)

for chunk in completion:
    if hasattr(chunk.choices, '__len__') and len(chunk.choices) > 0:
        if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content is not None:
            print(chunk.choices[0].delta.content, end="")