Skip to main content
// Install dependency: npm install openai
import OpenAI from "openai";

const openai = new OpenAI({
    apiKey: "your-api-key",
    baseURL: "https://gateway.iotex.ai/v1"
});

// Simple conversation
async function simpleChat(prompt) {
    const completion = await openai.chat.completions.create({
        messages: [{ role: "user", content: prompt }],
        model: "gemini-2.5-flash",
        max_tokens: 1000
    });

    return completion.choices[0].message.content;
}

// Streaming output
async function streamChat(prompt) {
    const stream = await openai.chat.completions.create({
        model: "gemini-2.5-flash",
        messages: [{ role: "user", content: prompt }],
        stream: true,
    });

    for await (const chunk of stream) {
        process.stdout.write(chunk.choices[0]?.delta?.content || "");
    }
}

// Usage examples
const result = await simpleChat("Write a quicksort algorithm in JavaScript");
console.log(result);

console.log("\nStreaming example:");
await streamChat("Explain React Hooks usage");

Browser Usage with Fetch API

This example includes the API key directly in client-side code for demonstration purposes only. Never expose your API key in production frontend code. Instead, create a backend API proxy that forwards requests to the gateway.
async function streamChat(prompt, onChunk) {
    const response = await fetch("https://gateway.iotex.ai/v1/chat/completions", {
        method: "POST",
        headers: {
            "Authorization": "Bearer your-api-key",
            "Content-Type": "application/json"
        },
        body: JSON.stringify({
            model: "gemini-2.5-flash",
            messages: [{ role: "user", content: prompt }],
            stream: true
        })
    });

    if (!response.ok || !response.body) {
        const errorText = await response.text();
        throw new Error(`Streaming request failed (${response.status}): ${errorText}`);
    }

    const reader = response.body.getReader();
    const decoder = new TextDecoder();
    let buffer = "";

    while (true) {
        const { value, done } = await reader.read();
        if (done) break;

        buffer += decoder.decode(value, { stream: true });
        const events = buffer.split("\n\n");
        buffer = events.pop() || "";

        for (const event of events) {
            const lines = event.split("\n");
            for (const line of lines) {
                if (!line.startsWith("data: ")) continue;

                const payload = line.slice(6).trim();
                if (payload === "[DONE]") return;

                try {
                    const data = JSON.parse(payload);
                    const content = data.choices[0]?.delta?.content;
                    if (content) {
                        onChunk(content);
                    }
                } catch (e) {
                    console.warn("Skipping invalid SSE payload:", e);
                }
            }
        }
    }

    // Flush any buffered decoder state at end of stream.
    buffer += decoder.decode();
}

// Usage example
streamChat("Explain what machine learning is", (content) => {
    document.getElementById("output").textContent += content;
});