Skip to main content
MacPaw OpenAI is a community-maintained Swift package for OpenAI APIs. It provides a modern, Swift-native interface with async/await support, making it perfect for iOS, macOS, watchOS, and tvOS applications.

Installation

Swift Package Manager

Add the package to your Package.swift:
dependencies: [
    .package(url: "https://github.com/MacPaw/OpenAI.git", from: "0.3.0")
]
Or add it via Xcode:
  1. File → Add Package Dependencies
  2. Enter: https://github.com/MacPaw/OpenAI.git
  3. Select the version and add to your target

Configuration

import OpenAI

let configuration = OpenAI.Configuration(
    token: "lmnfl_your_api_key",
    host: "api.lumenfall.ai",
    scheme: "https",
    path: "/openai/v1"
)

let openAI = OpenAI(configuration: configuration)
Security consideration for mobile apps: API keys embedded in iOS, macOS, tvOS, or watchOS apps can be extracted by users through reverse engineering, jailbreaking, or debugging tools. Anyone with access to your API key can make requests at your expense.For production apps, consider:
  • Proxy through your backend: Route API calls through your own server that holds the API key securely
  • Per-user authentication: Issue individual API keys to authenticated users with appropriate rate limits
  • Usage monitoring: Set up alerts for unusual usage patterns in your Lumenfall dashboard

Using environment variables

let configuration = OpenAI.Configuration(
    token: ProcessInfo.processInfo.environment["LUMENFALL_API_KEY"] ?? "",
    host: "api.lumenfall.ai",
    scheme: "https",
    path: "/openai/v1"
)

let openAI = OpenAI(configuration: configuration)

Generate images

import OpenAI

func generateImage() async throws -> String {
    let configuration = OpenAI.Configuration(
        token: "lmnfl_your_api_key",
        host: "api.lumenfall.ai",
        scheme: "https",
        path: "/openai/v1"
    )
    let openAI = OpenAI(configuration: configuration)

    let query = ImagesQuery(
        prompt: "A serene mountain landscape at sunset with dramatic clouds",
        model: .other("gemini-3-pro-image"),
        n: 1,
        size: ._1024
    )

    let result = try await openAI.images(query: query)
    return result.data.first?.url ?? ""
}

Generation options

let query = ImagesQuery(
    prompt: "A beautiful garden with roses",
    model: .dall_e_3,
    n: 1,
    quality: .hd,
    responseFormat: .url,
    size: ._1792_1024,  // landscape
    style: .natural
)

let result = try await openAI.images(query: query)

Available sizes

SizeDimensions
._256256x256
._512512x512
._10241024x1024
._1024_17921024x1792 (portrait)
._1792_10241792x1024 (landscape)

Get base64 response

let query = ImagesQuery(
    prompt: "A cute robot painting",
    model: .other("gpt-image-1.5"),
    n: 1,
    responseFormat: .b64_json
)

let result = try await openAI.images(query: query)
let base64Image = result.data.first?.b64Json

Edit images

import OpenAI
import Foundation

func editImage() async throws -> String {
    let configuration = OpenAI.Configuration(
        token: "lmnfl_your_api_key",
        host: "api.lumenfall.ai",
        scheme: "https",
        path: "/openai/v1"
    )
    let openAI = OpenAI(configuration: configuration)

    // Load image data
    let imageURL = URL(fileURLWithPath: "original.png")
    let imageData = try Data(contentsOf: imageURL)

    let query = ImageEditsQuery(
        image: imageData,
        fileName: "original.png",
        prompt: "Add a rainbow in the sky",
        model: .other("gpt-image-1.5"),
        n: 1,
        size: ._1024
    )

    let result = try await openAI.imageEdits(query: query)
    return result.data.first?.url ?? ""
}

With a mask

let imageURL = URL(fileURLWithPath: "original.png")
let imageData = try Data(contentsOf: imageURL)

let maskURL = URL(fileURLWithPath: "mask.png")
let maskData = try Data(contentsOf: maskURL)

let query = ImageEditsQuery(
    image: imageData,
    fileName: "original.png",
    mask: maskData,
    maskFileName: "mask.png",
    prompt: "A sunlit indoor lounge area with a pool",
    model: .other("gpt-image-1.5"),
    n: 1,
    size: ._1024
)

let result = try await openAI.imageEdits(query: query)

SwiftUI example

import SwiftUI
import OpenAI

struct ImageGeneratorView: View {
    @State private var prompt = ""
    @State private var imageURL: URL?
    @State private var isLoading = false
    @State private var errorMessage: String?

    private let openAI: OpenAI

    init() {
        let configuration = OpenAI.Configuration(
            token: "lmnfl_your_api_key",
            host: "api.lumenfall.ai",
            scheme: "https",
            path: "/openai/v1"
        )
        self.openAI = OpenAI(configuration: configuration)
    }

    var body: some View {
        VStack(spacing: 20) {
            TextField("Enter your prompt", text: $prompt)
                .textFieldStyle(.roundedBorder)
                .padding(.horizontal)

            Button(action: generateImage) {
                if isLoading {
                    ProgressView()
                } else {
                    Text("Generate Image")
                }
            }
            .disabled(prompt.isEmpty || isLoading)
            .buttonStyle(.borderedProminent)

            if let url = imageURL {
                AsyncImage(url: url) { phase in
                    switch phase {
                    case .success(let image):
                        image
                            .resizable()
                            .aspectRatio(contentMode: .fit)
                    case .failure:
                        Text("Failed to load image")
                    case .empty:
                        ProgressView()
                    @unknown default:
                        EmptyView()
                    }
                }
                .frame(maxWidth: .infinity, maxHeight: 400)
            }

            if let error = errorMessage {
                Text(error)
                    .foregroundColor(.red)
            }
        }
        .padding()
    }

    private func generateImage() {
        isLoading = true
        errorMessage = nil

        Task {
            do {
                let query = ImagesQuery(
                    prompt: prompt,
                    model: .other("gemini-3-pro-image"),
                    n: 1,
                    size: ._1024
                )

                let result = try await openAI.images(query: query)

                await MainActor.run {
                    if let urlString = result.data.first?.url,
                       let url = URL(string: urlString) {
                        imageURL = url
                    }
                    isLoading = false
                }
            } catch {
                await MainActor.run {
                    errorMessage = error.localizedDescription
                    isLoading = false
                }
            }
        }
    }
}

iOS App with ViewModel

ViewModel

import Foundation
import OpenAI

@MainActor
class ImageGeneratorViewModel: ObservableObject {
    @Published var imageURL: URL?
    @Published var isLoading = false
    @Published var errorMessage: String?

    private let openAI: OpenAI

    init() {
        let configuration = OpenAI.Configuration(
            token: ProcessInfo.processInfo.environment["LUMENFALL_API_KEY"] ?? "",
            host: "api.lumenfall.ai",
            scheme: "https",
            path: "/openai/v1"
        )
        self.openAI = OpenAI(configuration: configuration)
    }

    func generateImage(prompt: String, model: String = "gemini-3-pro-image") async {
        isLoading = true
        errorMessage = nil

        do {
            let query = ImagesQuery(
                prompt: prompt,
                model: .other(model),
                n: 1,
                size: ._1024
            )

            let result = try await openAI.images(query: query)

            if let urlString = result.data.first?.url,
               let url = URL(string: urlString) {
                imageURL = url
            }
        } catch let error as OpenAIError {
            errorMessage = handleError(error)
        } catch {
            errorMessage = error.localizedDescription
        }

        isLoading = false
    }

    private func handleError(_ error: OpenAIError) -> String {
        switch error {
        case .apiError(let apiError):
            return "API Error: \(apiError.message)"
        case .emptyData:
            return "No data received"
        case .invalidData:
            return "Invalid response data"
        case .invalidURL:
            return "Invalid URL"
        case .requestFailed:
            return "Request failed"
        case .responseParsingFailed:
            return "Failed to parse response"
        case .streamResponseError:
            return "Stream error"
        default:
            return "Unknown error"
        }
    }
}

View

import SwiftUI

struct ContentView: View {
    @StateObject private var viewModel = ImageGeneratorViewModel()
    @State private var prompt = ""

    var body: some View {
        NavigationStack {
            VStack(spacing: 20) {
                TextField("Describe the image...", text: $prompt, axis: .vertical)
                    .textFieldStyle(.roundedBorder)
                    .lineLimit(3...6)

                Button {
                    Task {
                        await viewModel.generateImage(prompt: prompt)
                    }
                } label: {
                    Label(
                        viewModel.isLoading ? "Generating..." : "Generate",
                        systemImage: "wand.and.stars"
                    )
                }
                .disabled(prompt.isEmpty || viewModel.isLoading)
                .buttonStyle(.borderedProminent)

                if let url = viewModel.imageURL {
                    AsyncImage(url: url) { image in
                        image
                            .resizable()
                            .aspectRatio(contentMode: .fit)
                            .cornerRadius(12)
                    } placeholder: {
                        ProgressView()
                    }
                }

                if let error = viewModel.errorMessage {
                    Text(error)
                        .foregroundColor(.red)
                        .font(.caption)
                }

                Spacer()
            }
            .padding()
            .navigationTitle("Image Generator")
        }
    }
}

Error handling

func generateImageSafely(prompt: String) async -> URL? {
    do {
        let query = ImagesQuery(
            prompt: prompt,
            model: .other("gemini-3-pro-image"),
            n: 1,
            size: ._1024
        )

        let result = try await openAI.images(query: query)

        if let urlString = result.data.first?.url {
            return URL(string: urlString)
        }
        return nil
    } catch let error as OpenAIError {
        switch error {
        case .apiError(let apiError):
            if apiError.code == "AUTHENTICATION_FAILED" {
                print("Invalid API key")
            } else if apiError.code == "RATE_LIMITED" {
                print("Rate limit exceeded")
            } else if apiError.code == "INSUFFICIENT_BALANCE" {
                print("Insufficient balance")
            } else {
                print("API error: \(apiError.message)")
            }
        default:
            print("Error: \(error)")
        }
        return nil
    } catch {
        print("Unexpected error: \(error)")
        return nil
    }
}

Next steps