QANATIX
Integrations

OpenAI (GPT)

Use QANATIX as a function calling tool with GPT-4o.

OpenAI Integration

Use QANATIX as a function calling tool with GPT-4o or any OpenAI model.

Define the tool

tools = [
    {
        "type": "function",
        "function": {
            "name": "qanatix_search",
            "description": "Search verified enterprise data. Returns ranked results from the user's private database — not web content.",
            "parameters": {
                "type": "object",
                "properties": {
                    "vertical": {
                        "type": "string",
                        "description": "Data vertical to search, e.g. 'manufacturing', 'pharma'"
                    },
                    "query": {
                        "type": "string",
                        "description": "Natural language search query"
                    },
                    "limit": {
                        "type": "integer",
                        "description": "Max results to return",
                        "default": 5
                    }
                },
                "required": ["vertical", "query"]
            }
        }
    }
]

Handle the tool call

import httpx
from openai import OpenAI

client = OpenAI()
QANATIX_URL = "https://api.qanatix.com/api/v1"
QANATIX_KEY = "sk_live_abc123..."

def call_qanatix(vertical: str, query: str, limit: int = 5) -> dict:
    resp = httpx.post(
        f"{QANATIX_URL}/search/{vertical}",
        headers={"Authorization": f"Bearer {QANATIX_KEY}"},
        json={"query": query, "limit": limit, "format": "compact"},
    )
    return resp.json()

# Chat with tool use
messages = [{"role": "user", "content": "Find M8 stainless bolts in stock"}]
response = client.chat.completions.create(
    model="gpt-4o",
    messages=messages,
    tools=tools,
)

# If GPT wants to call the tool
if response.choices[0].message.tool_calls:
    tool_call = response.choices[0].message.tool_calls[0]
    args = json.loads(tool_call.function.arguments)
    result = call_qanatix(**args)

    # Send result back to GPT
    messages.append(response.choices[0].message)
    messages.append({
        "role": "tool",
        "tool_call_id": tool_call.id,
        "content": json.dumps(result),
    })
    final = client.chat.completions.create(
        model="gpt-4o",
        messages=messages,
    )
    print(final.choices[0].message.content)

Token optimization

Use "format": "compact" to get markdown table output that uses ~120 tokens per result instead of ~800 from raw JSON. This significantly reduces your GPT costs.

On this page