Installation
Add InsForge to your Swift Package Manager dependencies:
dependencies: [
.package(url: "https://github.com/insforge/insforge-swift.git", from: "0.0.9")
]
import InsForge
let insforge = InsForgeClient(
baseURL: URL(string: "https://your-app.insforge.app")!,
anonKey: "your-anon-key"
)
Enable Logging (Optional)
For debugging, you can configure the SDK log level and destination:
let options = InsForgeClientOptions(
global: .init(
logLevel: .debug,
logDestination: .osLog,
logSubsystem: "com.example.MyApp"
)
)
let insforge = InsForgeClient(
baseURL: URL(string: "https://your-app.insforge.app")!,
anonKey: "your-anon-key",
options: options
)
Log Levels:
| Level | Description |
|---|
.trace | Most verbose, includes all internal details |
.debug | Detailed information for debugging |
.info | General operational information (default) |
.warning | Warnings that don’t prevent operation |
.error | Errors that affect functionality |
.critical | Critical failures |
Log Destinations:
| Destination | Description |
|---|
.console | Standard output (print) |
.osLog | Apple’s unified logging system (recommended for iOS/macOS) |
.none | Disable logging |
.custom | Provide your own LogHandler factory |
Use .info or .error in production to avoid exposing sensitive data in logs.
chatCompletion()
Generate AI chat completions.
Parameters
model (String) - AI model (e.g., “anthropic/claude-3.5-haiku”, “openai/gpt-4o”)
messages ([ChatMessage]) - Array of chat messages
stream (Bool, optional) - Enable streaming mode (default: false)
temperature (Double, optional) - Sampling temperature 0-2
maxTokens (Int, optional) - Max tokens to generate
topP (Double, optional) - Top-p sampling parameter
systemPrompt (String, optional) - System prompt for the conversation
webSearch (WebSearchPlugin?, optional) - Enable web search capabilities
fileParser (FileParserPlugin?, optional) - Enable file/PDF parsing
Returns
Example
let response = try await insforge.ai.chatCompletion(
model: "anthropic/claude-3.5-haiku",
messages: [
ChatMessage(role: .user, content: "What is the capital of France?")
]
)
print(response.text)
// "The capital of France is Paris."
// Access metadata
if let metadata = response.metadata {
print("Model: \(metadata.model)")
if let usage = metadata.usage {
print("Tokens used: \(usage.totalTokens)")
}
}
// Access annotations if available (from web search)
if let annotations = response.annotations {
for annotation in annotations {
print("Citation: \(annotation.urlCitation.url)")
if let title = annotation.urlCitation.title {
print("Title: \(title)")
}
}
}
Multimodal Messages
The SDK supports multimodal messages with images, PDFs, and audio. You can use either simple text content (backward compatible) or an array of content parts.
// Simple text message (backward compatible)
let textMessage = ChatMessage(role: .user, content: "Hello!")
// Message with image
let imageMessage = ChatMessage(role: .user, content: [
.text("What is in this image?"),
.image(url: "https://example.com/image.jpg", detail: .high)
])
// Message with PDF file
let fileMessage = ChatMessage(role: .user, content: [
.text("Summarize this document"),
.file(filename: "doc.pdf", fileData: "https://example.com/doc.pdf")
])
// Message with audio
let audioMessage = ChatMessage(role: .user, content: [
.text("Transcribe this audio"),
.audio(data: "base64encodeddata", format: .mp3)
])
Example with Web Search
let response = try await insforge.ai.chatCompletion(
model: "openai/gpt-4o",
messages: [
ChatMessage(role: .user, content: "What's the latest news about AI?")
],
webSearch: WebSearchPlugin(
enabled: true,
maxResults: 5,
engine: .native
)
)
print(response.text)
// Access URL citations from search results
if let annotations = response.annotations {
for annotation in annotations {
print("Source: \(annotation.urlCitation.title ?? "Unknown") - \(annotation.urlCitation.url)")
}
}
Example with PDF Parsing
// Include PDF file in the message content
let response = try await insforge.ai.chatCompletion(
model: "anthropic/claude-sonnet-4.5",
messages: [
ChatMessage(role: .user, content: [
.text("Summarize this document"),
.file(filename: "report.pdf", fileData: "https://example.com/report.pdf")
])
],
fileParser: FileParserPlugin(
enabled: true,
pdf: PdfParserConfig(engine: .mistralOcr)
)
)
print(response.text)
generateEmbeddings()
Generate vector embeddings for text input using AI models.
Parameters
model (String) - Embedding model identifier (e.g., “google/gemini-embedding-001”)
input (EmbeddingInput) - Text input to embed (single or multiple)
encodingFormat (EmbeddingEncodingFormat?, optional) - Output format (.float or .base64)
dimensions (Int?, optional) - Number of dimensions for the output embeddings
Returns
Example (With Optional Parameters)
let response = try await insforge.ai.generateEmbeddings(
model: "google/gemini-embedding-001",
input: .single("Hello world"),
encodingFormat: .float,
dimensions: 768
)
print("Embedding dimensions: \(response.data.first?.embedding.count ?? 0)") // 768
generateImage()
Generate images using AI models.
Parameters
model (String) - Image generation model
prompt (String) - Text description of the image to generate
Returns
Example
let response = try await insforge.ai.generateImage(
model: "google/gemini-2.5-flash-image-preview",
prompt: "A serene mountain landscape at sunset"
)
// Access generated images
for image in response.images {
print("Image URL: \(image.imageUrl)")
print("Type: \(image.type)")
}
// Get image count
print("Generated \(response.imageCount) images")
// Access metadata
if let metadata = response.metadata {
print("Model: \(metadata.model)")
if let revisedPrompt = metadata.revisedPrompt {
print("Revised prompt: \(revisedPrompt)")
}
}
listModels()
List available AI models.
Returns
Example
let models = try await insforge.ai.listModels()
// List text generation models
print("Text Models:")
for provider in models.text {
print(" Provider: \(provider.provider) (configured: \(provider.configured))")
for model in provider.models {
print(" - \(model.id)")
print(" Input: \(model.inputModality)")
print(" Output: \(model.outputModality)")
print(" Price level: \(model.priceLevel)")
}
}
// List image generation models
print("\nImage Models:")
for provider in models.image {
print(" Provider: \(provider.provider)")
for model in provider.models {
print(" - \(model.id)")
}
}
Models Reference
Enums
// Search engine options for web search
public enum WebSearchEngine: String, Codable {
case native
case exa
}
// PDF processing engine options
public enum PdfEngine: String, Codable {
case pdfText = "pdf-text"
case mistralOcr = "mistral-ocr"
case native
}
Plugin Configuration
// Web search plugin configuration
public struct WebSearchPlugin: Codable, Sendable {
let enabled: Bool
let engine: WebSearchEngine?
let maxResults: Int? // 1-10, default: 5
let searchPrompt: String? // Custom prompt for search results
init(
enabled: Bool = true,
engine: WebSearchEngine? = nil,
maxResults: Int? = nil,
searchPrompt: String? = nil
)
}
// PDF parser configuration
public struct PdfParserConfig: Codable, Sendable {
let engine: PdfEngine?
init(engine: PdfEngine? = nil)
}
// File parser plugin configuration
public struct FileParserPlugin: Codable, Sendable {
let enabled: Bool
let pdf: PdfParserConfig?
init(enabled: Bool = true, pdf: PdfParserConfig? = nil)
}
ChatMessage
public struct ChatMessage: Codable, Sendable {
let role: Role
let content: MessageContent
enum Role: String, Codable {
case user
case assistant
case system
}
// Initialize with simple text (backward compatible)
init(role: Role, content: String)
// Initialize with multimodal content
init(role: Role, content: [ContentPart])
}
// Content can be simple text or multimodal parts
public enum MessageContent: Codable, Sendable {
case text(String)
case parts([ContentPart])
}
// Multimodal content parts
public enum ContentPart: Codable, Sendable {
case text(String)
case image(url: String, detail: ImageDetail? = nil)
case file(filename: String, fileData: String)
case audio(data: String, format: AudioFormat)
}
// Image detail level
public enum ImageDetail: String, Codable, Sendable {
case auto
case low
case high
}
// Supported audio formats
public enum AudioFormat: String, Codable, Sendable {
case wav
case mp3
case aiff
case aac
case ogg
case flac
case m4a
}
ChatCompletionResponse
public struct ChatCompletionResponse: Codable, Sendable {
let text: String // The generated response text
let annotations: [UrlCitationAnnotation]? // URL citations from web search
let metadata: Metadata?
// Computed properties
var content: String { text } // Alias for text
var success: Bool // True if text is not empty
struct Metadata: Codable {
let model: String
let usage: TokenUsage?
}
}
Annotations
// URL citation information from web search results
public struct UrlCitation: Codable, Sendable {
let url: String
let title: String?
let content: String?
let startIndex: Int?
let endIndex: Int?
}
// Annotation containing URL citation
public struct UrlCitationAnnotation: Codable, Sendable {
let type: String // "url_citation"
let urlCitation: UrlCitation
}
TokenUsage
public struct TokenUsage: Codable, Sendable {
let promptTokens: Int
let completionTokens: Int
let totalTokens: Int
}
EmbeddingsResponse
// Encoding format options for embeddings
public enum EmbeddingEncodingFormat: String, Codable, Sendable {
case float
case base64
}
// Input type for embeddings (single or multiple texts)
public enum EmbeddingInput: Sendable {
case single(String)
case multiple([String])
}
public struct EmbeddingsResponse: Codable, Sendable {
let object: String // "list"
let data: [EmbeddingObject]
let metadata: Metadata?
struct Metadata: Codable {
let model: String
let usage: EmbeddingsUsage?
}
}
public struct EmbeddingObject: Codable, Sendable {
let object: String // "embedding"
let embedding: [Double] // or String for base64 format
let index: Int
}
public struct EmbeddingsUsage: Codable, Sendable {
let promptTokens: Int?
let totalTokens: Int?
}
ImageGenerationResponse
public struct ImageGenerationResponse: Codable, Sendable {
let model: String?
let images: [ImageMessage]
let text: String?
let count: Int?
let metadata: Metadata?
// Computed property
var imageCount: Int // count ?? images.count
struct Metadata: Codable {
let model: String
let revisedPrompt: String?
let usage: TokenUsage?
}
}
ImageMessage
public struct ImageMessage: Codable, Sendable {
let type: String
let imageUrl: String
// Computed property
var url: String { imageUrl } // Alias for imageUrl
}
AIModel
public struct AIModel: Codable, Sendable {
let id: String
let modelId: String
let provider: String
let inputModality: [String] // e.g., ["text"], ["text", "image"]
let outputModality: [String] // e.g., ["text"], ["image"]
let priceLevel: Int
// Computed properties
var name: String { id }
}
ListModelsResponse
public struct ListModelsResponse: Codable, Sendable {
let text: [ModelProvider] // Text generation models
let image: [ModelProvider] // Image generation models
struct ModelProvider: Codable {
let provider: String
let configured: Bool
let models: [AIModel]
}
}