Swift Examples - OpenAI
Service setup
Create an OpenAI service in the AIProxy dashboard
Follow the integration guide, selecting the OpenAI icon on the 'Create a New Service' form.
Get a non-streaming chat completion from OpenAI
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
do {
let response = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o",
messages: [.user(content: .text("hello world"))]
))
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI chat completion: \(error.localizedDescription)")
}
Get a streaming chat completion from OpenAI
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o-mini",
messages: [.user(content: .text("hello world"))]
)
do {
let stream = try await openAIService.streamingChatCompletionRequest(body: requestBody)
for try await chunk in stream {
print(chunk.choices.first?.delta.content ?? "")
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI streaming chat completion: \(error.localizedDescription)")
}
How to include history in chat completion requests to OpenAI
Use this approach to have a conversation with ChatGPT. All previous chat messages, whether issued by the user or the assistant (ChatGPT), are fed back into the model on each request.
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
// We'll start the conversation by asking about the color of a blackberry.
// There is no prior history, so we only send up a single user message.
//
// You can optionally include a .system message to give the model
// instructions on how it should behave.
let userMessage1: OpenAIChatCompletionRequestBody.Message = .user(
content: .text("What color is a blackberry?")
)
// Create the first chat completion.
var completion1: OpenAIChatCompletionResponseBody? = nil
do {
completion1 = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o-mini",
messages: [
userMessage1
]
))
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not get first chat completion: \(error.localizedDescription)")
}
// Get the contents of the model's first response:
guard let assistantContent1 = completion1?.choices.first?.message.content else {
print("Completion1: ChatGPT did not respond with any assistant content")
return
}
print("Completion1: \(assistantContent1)")
// Continue the conversation by asking about a strawberry.
// If the history were absent from the request, ChatGPT would respond with general facts.
// By including the history, the model continues the conversation, understanding that we
// are specifically interested in the strawberry's color.
let userMessage2: OpenAIChatCompletionRequestBody.Message = .user(
content: .text("And a strawberry?")
)
// Create the second chat completion, note the `messages` array.
var completion2: OpenAIChatCompletionResponseBody? = nil
do {
completion2 = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o-mini",
messages: [
userMessage1,
.assistant(content: .text(assistantContent1)),
userMessage2
]
))
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not get second chat completion: \(error.localizedDescription)")
}
// Get the contents of the model's second response:
guard let assistantContent2 = completion2?.choices.first?.message.content else {
print("Completion2: ChatGPT did not respond with any assistant content")
return
}
print("Completion2: \(assistantContent2)")
Send a multi-modal chat completion request to OpenAI
On macOS, use NSImage(named:) in place of UIImage(named:)
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
guard let image = UIImage(named: "myImage") else {
print("Could not find an image named 'myImage' in your app assets")
return
}
guard let imageURL = AIProxy.encodeImageAsURL(image: image, compressionQuality: 0.6) else {
print("Could not convert image to OpenAI's imageURL format")
return
}
do {
let response = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o",
messages: [
.system(
content: .text("Tell me what you see")
),
.user(
content: .parts(
[
.text("What do you see?"),
.imageURL(imageURL, detail: .auto)
]
)
)
]
))
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI multi-modal chat completion: \(error.localizedDescription)")
}
How to generate an image with DALLE
This snippet will print out the URL of an image generated with dall-e-3:
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
do {
let requestBody = OpenAICreateImageRequestBody(
prompt: "a skier",
model: "dall-e-3"
)
let response = try await openAIService.createImageRequest(body: requestBody)
print(response.data.first?.url ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not generate an image with OpenAI's DALLE: \(error.localizedDescription)")
}
How to ensure OpenAI returns JSON as the chat message content
If you need to enforce a strict JSON contract, please use Structured Outputs (the next example) instead of this approach. This approach is referred to as 'JSON mode' in the OpenAI docs, and is the predecessor to Structured Outputs.
JSON mode is enabled with responseFormat: .jsonObject, while Structured Outputs is enabled with responseFormat: .jsonSchema.
If you use JSON mode, set responseFormat and specify in the prompt that OpenAI should return JSON only:
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
do {
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o",
messages: [
.system(content: .text("Return valid JSON only")),
.user(content: .text("Return alice and bob in a list of names"))
],
responseFormat: .jsonObject
)
let response = try await openAIService.chatCompletionRequest(body: requestBody)
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI chat completion in JSON mode: \(error.localizedDescription)")
}
How to use OpenAI structured outputs (JSON schemas) in a chat response
This example prompts chatGPT to construct a color palette and conform to a strict JSON schema in its response:
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
do {
let schema: [String: AIProxyJSONValue] = [
"type": "object",
"properties": [
"colors": [
"type": "array",
"items": [
"type": "object",
"properties": [
"name": [
"type": "string",
"description": "A descriptive name to give the color"
],
"hex_code": [
"type": "string",
"description": "The hex code of the color"
]
],
"required": ["name", "hex_code"],
"additionalProperties": false
]
]
],
"required": ["colors"],
"additionalProperties": false
]
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o-2024-08-06",
messages: [
.system(content: .text("Return valid JSON only, and follow the specified JSON structure")),
.user(content: .text("Return a peaches and cream color palette"))
],
responseFormat: .jsonSchema(
name: "palette_creator",
description: "A list of colors that make up a color pallete",
schema: schema,
strict: true
)
)
let response = try await openAIService.chatCompletionRequest(body: requestBody)
print(response.choices.first?.message.content ?? "")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI chat completion with structured outputs: \(error.localizedDescription)")
}
How to use OpenAI structured outputs with a function call
This implements the example in OpenAI's new function calling guide.
For more examples, see the original structured outputs announcement.
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
func getWeather(location: String?) -> String {
// Fill this with your native function logic.
// Using a stub for this example.
return "Sunny and 65 degrees"
}
// We'll start the conversation by asking about the weather.
// There is no prior history, so we only send up a single user message.
//
// You can optionally include a .system message to give the model
// instructions on how it should behave.
let userMessage: OpenAIChatCompletionRequestBody.Message = .user(
content: .text("What is the weather in SF?")
)
var completion1: OpenAIChatCompletionResponseBody? = nil
do {
completion1 = try await openAIService.chatCompletionRequest(body: .init(
model: "gpt-4o-mini",
messages: [
userMessage
],
tools: [
.function(
name: "get_weather",
description: "Get current temperature for a given location.",
parameters: [
"type": "object",
"properties": [
"location": [
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
]
],
"required": ["location"],
"additionalProperties": false
],
strict: true
)
]
))
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not get first chat completion: \(error.localizedDescription)")
}
// Get the contents of the model's first response:
guard let toolCall = completion1?.choices.first?.message.toolCalls?.first else {
print("Completion1: ChatGPT did not respond with a tool call")
return
}
// Invoke the function call natively.
guard toolCall.function.name == "get_weather" else {
print("We only know how to get the weather")
return
}
let weather = getWeather(location: toolCall.function.arguments?["location"] as? String)
// Pass the results of the function call back to OpenAI.
// We create a second chat completion, note the `messages` array in
// the completion request. It passes back up:
// 1. the original user message
// 2. the response from the assistant, which told us to call the get_weather function
// 3. the result of our `getWeather` invocation
let toolMessage: OpenAIChatCompletionRequestBody.Message = .tool(
content: .text(weather),
toolCallID: toolCall.id
)
var completion2: OpenAIChatCompletionResponseBody? = nil
do {
completion2 = try await openAIService.chatCompletionRequest(
body: .init(
model: "gpt-4o-mini",
messages: [
userMessage,
.assistant(
toolCalls: [
.init(
id: toolCall.id,
function: .init(
name: toolCall.function.name,
arguments: toolCall.function.argumentsRaw
)
)
]
),
toolMessage
]
)
)
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not get second chat completion: \(error.localizedDescription)")
}
// Get the contents of the model's second response:
guard let assistantContent2 = completion2?.choices.first?.message.content else {
print("Completion2: ChatGPT did not respond with any assistant content")
return
}
print(assistantContent2)
// Prints: "The weather in San Francisco is sunny with a temperature of 65 degrees Fahrenheit."
How to stream structured outputs tool calls with OpenAI
This example it taken from OpenAI's function calling guide.
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
let requestBody = OpenAIChatCompletionRequestBody(
model: "gpt-4o-mini",
messages: [
.user(content: .text("What is the weather like in Paris today?")),
],
tools: [
.function(
name: "get_weather",
description: "Get current temperature for a given location.",
parameters: [
"type": "object",
"properties": [
"location": [
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
],
],
"required": ["location"],
"additionalProperties": false
],
strict: true
),
]
)
do {
let stream = try await openAIService.streamingChatCompletionRequest(body: requestBody)
for try await chunk in stream {
guard let delta = chunk.choices.first?.delta else {
continue
}
// If the model decided to call a function, this branch will be entered:
if let toolCall = delta.toolCalls?.first {
if let functionName = toolCall.function?.name {
print("ChatGPT wants to call function \(functionName) with arguments...")
}
print(toolCall.function?.arguments ?? "")
}
// If the model decided to chat, this branch will be entered:
if let content = delta.content {
print(content)
}
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
print("Could not make a streaming tool call to OpenAI: \(error.localizedDescription)")
}
How to get Whisper word-level timestamps in an audio transcription
- Record an audio file in quicktime and save it as "helloworld.m4a"
- Add the audio file to your Xcode project. Make sure it's included in your target: select your audio file in the project tree, type
cmd-opt-0
to open the inspect panel, and viewTarget Membership
- Run this snippet:
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
do {
let url = Bundle.main.url(forResource: "helloworld", withExtension: "m4a")!
let requestBody = OpenAICreateTranscriptionRequestBody(
file: try Data(contentsOf: url),
model: "whisper-1",
responseFormat: "verbose_json",
timestampGranularities: [.word, .segment]
)
let response = try await openAIService.createTranscriptionRequest(body: requestBody)
if let words = response.words {
for word in words {
print("\(word.word) from \(word.start) to \(word.end)")
}
}
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not get word-level timestamps from OpenAI: \(error.localizedDescription)")
}
How to use OpenAI text-to-speech
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
do {
let requestBody = OpenAITextToSpeechRequestBody(
input: "Hello world",
voice: .nova
)
let mpegData = try await openAIService.createTextToSpeechRequest(body: requestBody)
// Do not use a local `let` or `var` for AVAudioPlayer.
// You need the lifecycle of the player to live beyond the scope of this function.
// Instead, use file scope or set the player as a member of a reference type with long life.
// For example, at the top of this file you may define:
//
// fileprivate var audioPlayer: AVAudioPlayer? = nil
//
// And then use the code below to play the TTS result:
audioPlayer = try AVAudioPlayer(data: mpegData)
audioPlayer?.prepareToPlay()
audioPlayer?.play()
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not create OpenAI TTS audio: \(error.localizedDescription)")
}
How to classify text and images as potentially harmful with OpenAI
import AIProxy
/* Uncomment for BYOK use cases */
// let openAIService = AIProxy.openAIDirectService(
// unprotectedAPIKey: "your-openai-key"
// )
/* Uncomment for all other production use cases */
// let openAIService = AIProxy.openAIService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )
let requestBody = OpenAIModerationRequestBody(
input: [
.text("is this bad"),
],
model: "omni-moderation-latest"
)
do {
let response = try await openAIService.moderationRequest(body: requestBody)
print("Is this content flagged: \(response.results.first?.flagged ?? false)")
//
// For a more detailed assessment of the input content, inspect:
//
// response.results.first?.categories
//
// and
//
// response.results.first?.categoryScores
//
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
print("Could not perform moderation request to OpenAI")
}
How to use OpenAI through an Azure deployment
You can use all of the OpenAI snippets aboves with one change. Initialize the OpenAI service with:
import AIProxy
let openAIService = AIProxy.openAIService(
partialKey: "partial-key-from-your-developer-dashboard",
serviceURL: "service-url-from-your-developer-dashboard",
requestFormat: .azureDeployment(apiVersion: "2024-06-01")
)