Local LLM Text Generation Example with LlmWrapper (Kotlin)

Kotlin
import com.nexa.sdk.LlmWrapper
import com.nexa.sdk.bean.GenerationConfig

val llm = LlmWrapper.builder()
    .modelPath("/path/to/your/model.gguf")
    .dispatcher(Dispatchers.IO)
    .build()
    .getOrNull()

val generateResult = llm?.generate(
    prompt = "prompt",
    config = GenerationConfig()
)

generateResult?.onSuccess { response ->
}.onFailure { exception ->
}

Local LLM Streaming Text Generation Example with LlmWrapper (Kotlin)

Kotlin
import com.nexa.sdk.LlmWrapper
import com.nexa.sdk.bean.GenerationConfig

val llm = LlmWrapper.builder()
    .modelPath("/path/to/your/model.gguf")
    .dispatcher(Dispatchers.IO)
    .build()
    .getOrNull()

val generateResult = llm?.generateStreamFlow(
    prompt = "prompt",
    config = GenerationConfig()
)

generateResult?.collect { result ->
    result.onSuccess { chunk ->
    }.onFailure { exception ->
    }
}

Local VLM Text Generation Example with VlmWrapper (Kotlin)

Kotlin
import com.nexa.sdk.VlmWrapper
import com.nexa.sdk.bean.GenerationConfig

val vlm = VlmWrapper.builder()
    .modelPath("/path/to/your/model.gguf")
    .mmprojPath("/path/to/your/mmprojPath.gguf")
    .ctxLen(2048)
    .dispatcher(Dispatchers.IO)
    .build()
    .getOrNull()

val generateResult = vlm?.generate(
    prompt = "prompt",
    config = GenerationConfig()
)

generateResult?.onSuccess { response ->
}.onFailure { exception ->
}

Local VLM Streaming Text Generation Example with VlmWrapper (Kotlin)

Kotlin
import com.nexa.sdk.VlmWrapper
import com.nexa.sdk.bean.GenerationConfig

val vlm = VlmWrapper.builder()
    .modelPath("/path/to/your/model.gguf")
    .mmprojPath("/path/to/your/mmprojPath.gguf")
    .ctxLen(2048)
    .dispatcher(Dispatchers.IO)
    .build()
    .getOrNull()

val generateResult = vlm?.generateStreamFlow(
    prompt = "prompt",
    config = GenerationConfig()
)

generateResult?.collect { result ->
    result.onSuccess { chunk ->
    }.onFailure { exception ->
    }
}