Swift Examples - Gemini

Service setup

Create a Gemini service in the AIProxy dashboard

Follow the integration guide, selecting the Gemini icon on the 'Create a New Service' form.

How to generate text content with Gemini

import AIProxy

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

let requestBody = GeminiGenerateContentRequestBody(
    contents: [
        .init(
            parts: [.text("How do I use product xyz?")]
        )
    ],
    generationConfig: .init(maxOutputTokens: 1024),
    systemInstruction: .init(parts: [.text("Introduce yourself as a customer support person")])
)
do {
    let response = try await geminiService.generateContentRequest(
        body: requestBody,
        model: "gemini-2.0-flash-exp"
    )
    for part in response.candidates?.first?.content?.parts ?? [] {
        switch part {
        case .text(let text):
            print("Gemini sent: \(text)")
        case .functionCall(name: let functionName, args: let arguments):
            print("Gemini wants us to call function \(functionName) with arguments: \(arguments ?? [:])")
        }
    }
    if let usage = response.usageMetadata {
        print(
            """
            Used:
                \(usage.promptTokenCount ?? 0) prompt tokens
                \(usage.cachedContentTokenCount ?? 0) cached tokens
                \(usage.candidatesTokenCount ?? 0) candidate tokens
                \(usage.totalTokenCount ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not create Gemini generate content request: \(error.localizedDescription)")
}

How to make a tool call with Gemini

import AIProxy

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

let functionParameters: [String: AIProxyJSONValue] = [
    "type": "OBJECT",
    "properties": [
        "brightness": [
            "description": "Light level from 0 to 100. Zero is off and 100 is full brightness.",
            "type": "NUMBER"
        ],
        "colorTemperature": [
            "description": "Color temperature of the light fixture which can be `daylight`, `cool` or `warm`.",
            "type": "STRING"
        ]
    ],
    "required": [
        "brightness",
        "colorTemperature"
    ]
]

let requestBody = GeminiGenerateContentRequestBody(
    contents: [
        .init(
            parts: [.text("Dim the lights so the room feels cozy and warm.")],
            role: "user"
        )
    ],
    /* Uncomment this to enforce that a function is called regardless of prompt contents. */
    // toolConfig: .init(
    //     functionCallingConfig: .init(
    //         allowedFunctionNames: ["controlLight"],
    //         mode: .anyFunction
    //     )
    // ),
    tools: [
        .functionDeclarations(
            [
                .init(
                    name: "controlLight",
                    description: "Set the brightness and color temperature of a room light.",
                    parameters: functionParameters
                )
            ]
        )
    ]
)

do {
    let response = try await geminiService.generateContentRequest(
        body: requestBody,
        model: "gemini-2.0-flash-exp"
    )
    for part in response.candidates?.first?.content?.parts ?? [] {
        switch part {
        case .text(let text):
            print("Gemini sent: \(text)")
        case .functionCall(name: let functionName, args: let arguments):
            print("Gemini wants us to call function \(functionName) with arguments: \(arguments ?? [:])")
        }
    }
    if let usage = response.usageMetadata {
        print(
            """
            Used:
                \(usage.promptTokenCount ?? 0) prompt tokens
                \(usage.cachedContentTokenCount ?? 0) cached tokens
                \(usage.candidatesTokenCount ?? 0) candidate tokens
                \(usage.totalTokenCount ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not create Gemini tool (function) call: \(error.localizedDescription)")
}

How to make a search grounding call with Gemini

It's important that you connect a GCP billing account to your Gemini API key to use this feature. Otherwise, Gemini will return 429s for every call. You can connect your billing account for the API keys you use here.

Consider applying to google for startups to gain credits that you can put towards Gemini.

import AIProxy

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

let requestBody = GeminiGenerateContentRequestBody(
    contents: [
        .init(
            parts: [.text("What is the price of Google stock today")],
            role: "user"
        )
    ],
    tools: [
        .googleSearchRetrieval(.init(dynamicThreshold: 0.7, mode: .dynamic))
    ]
)
do {
    let response = try await geminiService.generateContentRequest(
        body: requestBody,
        model: "gemini-1.5-flash"
    )
    for part in response.candidates?.first?.content?.parts ?? [] {
        switch part {
        case .text(let text):
            print("Gemini sent: \(text)")
        case .functionCall(name: let functionName, args: let arguments):
            print("Gemini wants us to call function \(functionName) with arguments: \(arguments ?? [:])")
        }
    }
    if let usage = response.usageMetadata {
        print(
            """
            Used:
                \(usage.promptTokenCount ?? 0) prompt tokens
                \(usage.cachedContentTokenCount ?? 0) cached tokens
                \(usage.candidatesTokenCount ?? 0) candidate tokens
                \(usage.totalTokenCount ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not create Gemini grounding search request: \(error.localizedDescription)")
}

How to transcribe audio with Gemini

Add a file called helloworld.m4a to your Xcode assets before running this sample snippet:

import AIProxy

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

guard let url = Bundle.main.url(forResource: "helloworld", withExtension: "m4a") else {
    print("Could not find an audio file named helloworld.m4a in your app bundle")
    return
}

do {
    let requestBody = GeminiGenerateContentRequestBody(
        contents: [
            .init(
                parts: [
                    .text("""
                            Can you transcribe this interview, in the format of timecode, speaker, caption?
                            Use speaker A, speaker B, etc. to identify speakers.
                            """),
                    .inline(data: try Data(contentsOf: url), mimeType: "audio/mp4")
                ]
            )
        ]
    )
    let response = try await geminiService.generateContentRequest(
        body: requestBody,
        model: "gemini-1.5-flash"
    )
    for part in response.candidates?.first?.content?.parts ?? [] {
        switch part {
        case .text(let text):
            print("Gemini transcript: \(text)")
        case .functionCall(name: let functionName, args: let arguments):
            print("Gemini wants us to call function \(functionName) with arguments: \(arguments ?? [:])")
        }
    }
    if let usage = response.usageMetadata {
        print(
            """
            Used:
                \(usage.promptTokenCount ?? 0) prompt tokens
                \(usage.cachedContentTokenCount ?? 0) cached tokens
                \(usage.candidatesTokenCount ?? 0) candidate tokens
                \(usage.totalTokenCount ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not create transcript with Gemini: \(error.localizedDescription)")
}

How to use images in the prompt to Gemini

Add a file called my-image.jpg to Xcode app assets. Then run this snippet:

import AIProxy

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

guard let image = NSImage(named: "my-image") else {
    print("Could not find an image named 'my-image' in your app assets")
    return
}

guard let jpegData = AIProxy.encodeImageAsJpeg(image: image, compressionQuality: 0.6) else {
    print("Could not encode image as Jpeg")
    return
}

do {
    let requestBody = GeminiGenerateContentRequestBody(
        contents: [
            .init(
                parts: [
                    .text("What do you see?"),
                    .inline(
                        data: jpegData,
                        mimeType: "image/jpeg"
                    )
                ]
            )
        ],
        safetySettings: [
            .init(category: .dangerousContent, threshold: .none),
            .init(category: .civicIntegrity, threshold: .none),
            .init(category: .harassment, threshold: .none),
            .init(category: .hateSpeech, threshold: .none),
            .init(category: .sexuallyExplicit, threshold: .none)
        ]
    )
    let response = try await geminiService.generateContentRequest(
        body: requestBody,
        model: "gemini-1.5-flash"
    )
    for part in response.candidates?.first?.content?.parts ?? [] {
        switch part {
        case .text(let text):
            print("Gemini sees: \(text)")
        case .functionCall(name: let functionName, args: let arguments):
            print("Gemini wants us to call function \(functionName) with arguments: \(arguments ?? [:])")
        }
    }
    if let usage = response.usageMetadata {
        print(
            """
            Used:
                \(usage.promptTokenCount ?? 0) prompt tokens
                \(usage.cachedContentTokenCount ?? 0) cached tokens
                \(usage.candidatesTokenCount ?? 0) candidate tokens
                \(usage.totalTokenCount ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not create Gemini generate content request: \(error.localizedDescription)")
}

How to upload a video file to Gemini temporary storage

Add a file called my-movie.mov to your Xcode assets before running this sample snippet. If you use a file like my-movie.mp4, change the mime type from video/quicktime to video/mp4 in the snippet below.

import AIProxy

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

// Try to upload the zip file in Xcode assets
// Get the images to train with:
guard let movieAsset = NSDataAsset(name: "my-movie") else {
    print("""
            Drop my-movie.mov into Assets first.
            """)
    return
}

do {
    let geminiFile = try await geminiService.uploadFile(
        fileData: movieAsset.data,
        mimeType: "video/quicktime"
    )
    print("""
            Video file uploaded to Gemini's media storage.
            It will be available for 48 hours.
            Find it at \(geminiFile.uri.absoluteString)
            """)
}  catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
    print("Could not upload file to Gemini: \(error.localizedDescription)")
}

How to convert video contents to text with Gemini

Use the file URL returned from the snippet above.

import AIProxy

let fileURL = URL(string: "url-from-snippet-above")!

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

let requestBody = GeminiGenerateContentRequestBody(
    model: "gemini-1.5-flash",
    contents: [
        .init(
            parts: [
                .text("Dump the text content in markdown from this video"),
                .file(
                    url: fileURL,
                    mimeType: "video/quicktime"
                )
            ]
        )
    ],
    safetySettings: [
        .init(category: .dangerousContent, threshold: .none),
        .init(category: .civicIntegrity, threshold: .none),
        .init(category: .harassment, threshold: .none),
        .init(category: .hateSpeech, threshold: .none),
        .init(category: .sexuallyExplicit, threshold: .none)
    ]
)

do {
    let response = try await geminiService.generateContentRequest(
        body: requestBody,
        model: "gemini-1.5-flash"
    )
    for part in response.candidates?.first?.content?.parts ?? [] {
        switch part {
        case .text(let text):
            print("Gemini transcript: \(text)")
        case .functionCall(name: let functionName, args: let arguments):
            print("Gemini wants us to call function \(functionName) with arguments: \(arguments ?? [:])")
        }
    }
    if let usage = response.usageMetadata {
        print(
            """
            Used:
                \(usage.promptTokenCount ?? 0) prompt tokens
                \(usage.cachedContentTokenCount ?? 0) cached tokens
                \(usage.candidatesTokenCount ?? 0) candidate tokens
                \(usage.totalTokenCount ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not create Gemini vision request: \(error.localizedDescription)")
}

How to delete a temporary file from Gemini storage

import AIProxy

let fileURL = URL(string: "url-from-snippet-above")!

/* Uncomment for BYOK use cases */
// let geminiService = AIProxy.geminiDirectService(
//     unprotectedAPIKey: "your-gemini-key"
// )

/* Uncomment for all other production use cases */
// let geminiService = AIProxy.geminiService(
//     partialKey: "partial-key-from-your-developer-dashboard",
//     serviceURL: "service-url-from-your-developer-dashboard"
// )

do {
    try await geminiService.deleteFile(fileURL: fileURL)
    print("File deleted from \(fileURL.absoluteString)")
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received \(statusCode) status code with response body: \(responseBody)")
} catch {
    print("Could not delete file from Gemini temporary storage: \(error.localizedDescription)")
}