Installation
Add InsForge dependencies to your project
Maven Central
GitHub Packages
build.gradle.kts: repositories {
mavenLocal () // For local development
mavenCentral ()
}
dependencies {
implementation ( "dev.insforge:insforge-kotlin:0.1.6" )
}
First, create a GitHub Personal Access Token:
Go to GitHub → Settings → Developer settings → Personal access tokens → Tokens (classic)
Select permission: read:packages
Then configure your project using one of the following methods:
Option A: Environment Variables
settings.gradle.kts (or build.gradle.kts): repositories {
mavenCentral ()
maven {
url = uri ( "https://maven.pkg.github.com/InsForge/insforge-kotlin" )
credentials {
username = System. getenv ( "GITHUB_USER" ) ?: ""
password = System. getenv ( "GITHUB_TOKEN" ) ?: ""
}
}
}
build.gradle.kts: dependencies {
implementation ( "dev.insforge:insforge-kotlin:0.1.6" )
}
Set environment variables before building: export GITHUB_USER = "your-github-username"
export GITHUB_TOKEN = "your-personal-access-token"
Option B: gradle.properties (Local Development)
Add credentials to your global Gradle properties file: ~/.gradle/gradle.properties: gpr.user =your-github-username
gpr.token =ghp_xxxxxxxxxxxx
settings.gradle.kts: repositories {
mavenCentral ()
maven {
url = uri ( "https://maven.pkg.github.com/InsForge/insforge-kotlin" )
credentials {
username = providers. gradleProperty ( "gpr.user" ).orNull ?: ""
password = providers. gradleProperty ( "gpr.token" ).orNull ?: ""
}
}
}
build.gradle.kts: dependencies {
implementation ( "dev.insforge:insforge-kotlin:0.1.6" )
}
The ~/.gradle/gradle.properties file is stored outside your project, so credentials won’t be accidentally committed to version control.
Initialize InsForge SDK
import dev.insforge.createInsforgeClient
import dev.insforge.auth.Auth
import dev.insforge.database.Database
import dev.insforge.storage.Storage
import dev.insforge.functions.Functions
import dev.insforge.realtime.Realtime
import dev.insforge.ai.AI
val client = createInsforgeClient (
baseUrl = "https://your-app.insforge.app" ,
anonKey = "your-api-key"
) {
install (Auth)
install (Database)
install (Storage)
install (Functions)
install (Realtime) {
autoReconnect = true
reconnectDelay = 5000
}
install (AI)
}
Enable Logging (Optional)
For debugging, you can configure the SDK log level:
import dev.insforge.InsforgeLogLevel
val client = createInsforgeClient (
baseUrl = "https://your-app.insforge.app" ,
anonKey = "your-api-key"
) {
// DEBUG: logs request method/URL and response status
// VERBOSE: logs full headers and request/response bodies
logLevel = InsforgeLogLevel.DEBUG
install (Auth)
install (Database)
// ... other modules
}
Log Level Description NONENo logging (default, recommended for production) ERROROnly errors WARNWarnings and errors INFOInformational messages DEBUGDebug info (request method, URL, response status) VERBOSEFull details (headers, request/response bodies)
Use NONE or ERROR in production to avoid exposing sensitive data in logs.
Android Initialization
Add Chrome Custom Tabs dependency to your build.gradle.kts:
dependencies {
implementation ( "androidx.browser:browser:1.9.0" )
}
Initialize InsForge SDK (With Chrome Custom Tabs and Session Storage)
import android.app.Activity
import android.content.Context
import android.content.Intent
import android.net.Uri
import androidx.browser.customtabs.CustomTabsIntent
import dev.insforge.createInsforgeClient
import dev.insforge.ai.AI
import dev.insforge.auth.Auth
import dev.insforge.auth.BrowserLauncher
import dev.insforge.auth.ClientType
import dev.insforge.auth.SessionStorage
import dev.insforge.database.Database
import dev.insforge.functions.Functions
import dev.insforge.realtime.Realtime
import dev.insforge.storage.Storage
class InsforgeManager ( private val context: Context ) {
val client = createInsforgeClient (
baseUrl = "https://your-app.insforge.app" ,
anonKey = "your-anon-key"
) {
install (Auth) {
// 1. Chrome Custom Tabs - opens in-app, similar to iOS ASWebAuthenticationSession
browserLauncher = BrowserLauncher { url ->
val customTabsIntent = CustomTabsIntent. Builder ()
. setShowTitle ( true )
. build ()
// Handle non-Activity context safely
if (context is Activity) {
customTabsIntent. launchUrl (context, Uri. parse (url))
} else {
customTabsIntent.intent. addFlags (Intent.FLAG_ACTIVITY_NEW_TASK)
customTabsIntent. launchUrl (context, Uri. parse (url))
}
}
// 2. enable session persistence
persistSession = true
// 3. config SessionStorage (use SharedPreferences)
sessionStorage = object : SessionStorage {
private val prefs = context. getSharedPreferences (
"insforge_auth" ,
Context.MODE_PRIVATE
)
override suspend fun save (key: String , value : String ) {
prefs. edit (). putString (key, value ). apply ()
}
override suspend fun get (key: String ): String ? {
return prefs. getString (key, null )
}
override suspend fun remove (key: String ) {
prefs. edit (). remove (key). apply ()
}
}
// 4. set client type for mobile
clientType = ClientType.MOBILE
}
// Install Database module
install (Database)
// Install Realtime module for real-time subscriptions
install (Realtime) {
debug = true
}
// Install other modules
install (Storage)
install (Functions)
install (AI)
}
}
Use Jetpack DataStore for Session Storage (Optional)
import androidx.datastore.core.DataStore
import androidx.datastore.preferences.core.Preferences
import androidx.datastore.preferences.core.edit
import androidx.datastore.preferences.core.stringPreferencesKey
import androidx.datastore.preferences.preferencesDataStore
import kotlinx.coroutines.flow.first
import kotlinx.coroutines.flow.map
val Context.authDataStore: DataStore < Preferences > by preferencesDataStore (name = "insforge_auth" )
class DataStoreSessionStorage ( private val context: Context ) : SessionStorage {
override suspend fun save (key: String , value : String ) {
context.authDataStore. edit { prefs ->
prefs[ stringPreferencesKey (key)] = value
}
}
override suspend fun get (key: String ): String ? {
return context.authDataStore. data . map { prefs ->
prefs[ stringPreferencesKey (key)]
}. first ()
}
override suspend fun remove (key: String ) {
context.authDataStore. edit { prefs ->
prefs. remove ( stringPreferencesKey (key))
}
}
}
// Then use it in your InsForge client
install (Auth) {
browserLauncher = .. .
persistSession = true
sessionStorage = DataStoreSessionStorage (context)
}
listModels()
List all available AI models.
Example
val models = client.ai. listModels ()
models. forEach { model ->
println ( " ${ model.provider } / ${ model.modelId } " )
println ( " Input: ${ model.inputModality } , Output: ${ model.outputModality } " )
println ( " Max tokens: ${ model.maxTokens } " )
}
chatCompletion()
Create an AI chat completion.
Parameters
model (String) - Model identifier (e.g., “anthropic/claude-3.5-haiku”, “openai/gpt-4o”)
messages (List<ChatMessage>) - Conversation messages
temperature (Double?, optional) - Sampling temperature (0.0-2.0)
maxTokens (Int?, optional) - Maximum tokens to generate
systemPrompt (String?, optional) - System prompt
webSearch (WebSearchPlugin?, optional) - Enable web search capabilities
fileParser (FileParserPlugin?, optional) - Enable file/PDF parsing
Returns
Example (Basic)
val response = client.ai. chatCompletion (
model = "anthropic/claude-3.5-haiku" ,
messages = listOf (
ChatMessage. user ( "What is the capital of France?" )
)
)
println (response.text) // Direct access to text content
println ( "Tokens used: ${ response.metadata.usage.totalTokens } " )
// Access annotations if available
response.annotations?. forEach { annotation ->
println ( "Citation: ${ annotation .urlCitation.url } " )
}
Multimodal Messages
The SDK supports multimodal messages with images, PDFs, and audio.
// Simple text message (backward compatible)
val response = client.ai. chatCompletion (
model = "openai/gpt-4" ,
messages = listOf (ChatMessage. user ( "Hello!" ))
)
// Custom multimodal message with multiple content parts
val customMessage = ChatMessage. multimodal (
"user" ,
TextContent (text = "Describe these images" ),
ImageContent (imageUrl = ImageUrlConfig (url = "https://example.com/1.jpg" )),
ImageContent (imageUrl = ImageUrlConfig (url = "https://example.com/2.jpg" ))
)
val response = client.ai. chatCompletion (
model = "anthropic/claude-sonnet-4.5" ,
messages = listOf (customMessage)
)
Example (With Web Search)
val response = client.ai. chatCompletion (
model = "openai/gpt-4" ,
messages = listOf (
ChatMessage. user ( "What are the latest news about AI?" )
),
webSearch = WebSearchPlugin (
enabled = true ,
maxResults = 5 ,
engine = WebSearchEngine.NATIVE
)
)
println (response.text)
// Access URL citations from search results
response.annotations?. forEach { annotation ->
println ( "Source: ${ annotation .urlCitation.title } - ${ annotation .urlCitation.url } " )
}
Example (With PDF Parsing)
// Using the convenience method
val response = client.ai. chatCompletionWithFile (
model = "anthropic/claude-sonnet-4.5" ,
text = "Summarize the content of this PDF" ,
filename = "document.pdf" ,
fileData = "https://example.com/document.pdf" ,
pdfEngine = PdfEngine.MISTRAL_OCR
)
println (response.text)
// Or using custom multimodal message
val pdfMessage = ChatMessage. multimodal (
"user" ,
TextContent (text = "Summarize this document" ),
FileContent (
file = FileConfig (
filename = "document.pdf" ,
fileData = "https://example.com/document.pdf"
)
)
)
val response2 = client.ai. chatCompletion (
model = "anthropic/claude-sonnet-4.5" ,
messages = listOf (pdfMessage),
fileParser = FileParserPlugin (
enabled = true ,
pdf = PdfParserConfig (engine = PdfEngine.MISTRAL_OCR)
)
)
println (response2.text)
chatCompletionWithImages()
Convenience method for chat completion with image analysis.
Parameters
model (String) - Model identifier (must support vision, e.g., “openai/gpt-4o”)
text (String) - Text prompt to accompany the images
imageUrls (List<String>) - List of image URLs to analyze
temperature (Double?, optional) - Sampling temperature
maxTokens (Int?, optional) - Maximum tokens to generate
Example
val response = client.ai. chatCompletionWithImages (
model = "openai/gpt-4o" ,
text = "What is in this image?" ,
imageUrls = listOf ( "https://example.com/image.jpg" )
)
println (response.text)
chatCompletionWithFile()
Convenience method for chat completion with file (PDF) analysis.
Parameters
model (String) - Model identifier
text (String) - Text prompt to accompany the file
filename (String) - Name of the file (e.g., “report.pdf”)
fileData (String) - File URL or base64-encoded data URL
pdfEngine (PdfEngine?, optional) - PDF processing engine
temperature (Double?, optional) - Sampling temperature
maxTokens (Int?, optional) - Maximum tokens to generate
Example (Base64 Encoded)
// Load PDF from local file
val pdfBytes = File ( "document.pdf" ). readBytes ()
val base64Data = "data:application/pdf;base64, ${ Base64. encodeToString (pdfBytes, Base64.NO_WRAP) } "
val response = client.ai. chatCompletionWithFile (
model = "anthropic/claude-sonnet-4.5" ,
text = "What are the key points in this document?" ,
filename = "document.pdf" ,
fileData = base64Data
)
println (response.text)
chatCompletionWithWebSearch()
Convenience method for chat completion with web search enabled.
Parameters
model (String) - Model identifier
messages (List<ChatMessage>) - Conversation messages
maxResults (Int?, optional) - Maximum search results (default: 5)
engine (WebSearchEngine?, optional) - Search engine to use
temperature (Double?, optional) - Sampling temperature
maxTokens (Int?, optional) - Maximum tokens to generate
Example
val response = client.ai. chatCompletionWithWebSearch (
model = "openai/gpt-4o" ,
messages = listOf (
ChatMessage (role = "user" , content = MessageContent. Text ( "What are today's top news headlines?" ))
),
maxResults = 5
)
println (response.text)
// Access citations
response.annotations?. forEach { annotation ->
println ( "- ${ annotation .urlCitation.title } : ${ annotation .urlCitation.url } " )
}
chatCompletionStream()
Create a streaming chat completion. Returns Flow<String> that emits content chunks directly.
Parameters
model (String) - Model identifier
messages (List<ChatMessage>) - Conversation messages
temperature (Double?, optional) - Sampling temperature
maxTokens (Int?, optional) - Maximum tokens to generate
webSearch (WebSearchPlugin?, optional) - Enable web search capabilities
fileParser (FileParserPlugin?, optional) - Enable file/PDF parsing
thinking (Boolean?, optional) - Enable extended reasoning mode
Example
client.ai. chatCompletionStream (
model = "anthropic/claude-3.5-haiku" ,
messages = listOf (
ChatMessage (role = "user" , content = "Tell me a story" )
)
). collect { content ->
print (content) // Content string directly
}
generateEmbeddings()
Generate vector embeddings for text input using AI models.
Parameters
model (String) - Embedding model identifier (e.g., “google/gemini-embedding-001”)
input (String?, optional) - Single text input to embed
inputs (List<String>?, optional) - Multiple text inputs to embed
encodingFormat (EmbeddingEncodingFormat?, optional) - Output format (FLOAT or BASE64)
dimensions (Int?, optional) - Number of dimensions for the output embeddings
Returns
Example (With Optional Parameters)
val response = client.ai. generateEmbeddings (
model = "google/gemini-embedding-001" ,
input = "Hello world" ,
encodingFormat = EmbeddingEncodingFormat.FLOAT,
dimensions = 512
)
println ( "Embedding dimensions: ${ response. data . first ().embedding.size } " ) // 512
generateImage()
Generate images using AI models.
Parameters
model (String) - Image generation model (e.g., “openai/dall-e-3”)
prompt (String) - Image description
Returns
Example
val response = client.ai. generateImage (
model = "google/gemini-2.5-flash-image-preview" ,
prompt = "A serene mountain landscape at sunset"
)
println ( "Generated ${ response.count } image(s)" )
response.images. forEach { image ->
val imageUrl = image.imageUrl.url
if (imageUrl. startsWith ( "data:image" )) {
// Handle base64 encoded image
val base64Data = imageUrl. substringAfter ( "base64," )
val imageData = Base64. decode (base64Data, Base64.DEFAULT)
val bitmap = BitmapFactory. decodeByteArray (imageData, 0 , imageData.size)
imageView. setImageBitmap (bitmap)
} else {
// Handle URL - load with Coil/Glide
// AsyncImage(model = imageUrl, ...)
}
}
Error Handling
import dev.insforge.exceptions.InsforgeHttpException
import dev.insforge.exceptions.InsforgeException
try {
val response = client.ai. chatCompletion (
model = "anthropic/claude-3.5-haiku" ,
messages = listOf ( ChatMessage (role = "user" , content = "Hello" ))
)
println (response.text)
} catch (e: InsforgeHttpException ) {
when (e.error) {
"MODEL_NOT_FOUND" -> println ( "Model not available" )
"RATE_LIMIT_EXCEEDED" -> println ( "Rate limit exceeded, try again later" )
"INVALID_REQUEST" -> println ( "Invalid request: ${ e.message } " )
else -> println ( "API Error: ${ e.message } " )
}
} catch (e: InsforgeException ) {
println ( "SDK Error: ${ e.message } " )
}