local llm supported

This commit is contained in:
gcw_4spBpAfv
2026-03-05 13:55:57 +08:00
parent 1701ecfb7f
commit bd07a7526a
43 changed files with 4258 additions and 115 deletions

View File

@@ -2,20 +2,37 @@ package com.digitalperson
import android.Manifest
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.os.Bundle
import android.util.Log
import android.widget.Toast
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.camera.view.PreviewView
import androidx.appcompat.app.AppCompatActivity
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.digitalperson.cloud.CloudApiManager
import com.digitalperson.audio.AudioProcessor
import com.digitalperson.vad.VadManager
import com.digitalperson.asr.AsrManager
import com.digitalperson.tts.TtsManager
import com.digitalperson.ui.Live2DUiManager
import com.digitalperson.config.AppConfig
import com.digitalperson.face.FaceDetectionPipeline
import com.digitalperson.face.FaceOverlayView
import com.digitalperson.face.ImageProxyBitmapConverter
import com.digitalperson.metrics.TraceManager
import com.digitalperson.metrics.TraceSession
import com.digitalperson.tts.TtsController
import com.digitalperson.llm.LLMManager
import com.digitalperson.llm.LLMManagerCallback
import com.digitalperson.util.FileHelper
import java.io.File
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.Job
@@ -26,14 +43,24 @@ import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
class Live2DChatActivity : AppCompatActivity() {
companion object {
private const val TAG_ACTIVITY = "Live2DChatActivity"
private const val TAG_LLM = "LLM_ROUTE"
}
private lateinit var uiManager: Live2DUiManager
private lateinit var vadManager: VadManager
private lateinit var asrManager: AsrManager
private lateinit var ttsManager: TtsManager
private lateinit var ttsController: TtsController
private lateinit var audioProcessor: AudioProcessor
private var llmManager: LLMManager? = null
private var useLocalLLM = false // 默认使用云端 LLM
private val permissions: Array<String> = arrayOf(Manifest.permission.RECORD_AUDIO)
private val appPermissions: Array<String> = arrayOf(
Manifest.permission.RECORD_AUDIO,
Manifest.permission.CAMERA
)
private val micPermissions: Array<String> = arrayOf(Manifest.permission.RECORD_AUDIO)
@Volatile
private var isRecording: Boolean = false
@@ -55,23 +82,46 @@ class Live2DChatActivity : AppCompatActivity() {
@Volatile private var llmInFlight: Boolean = false
private var enableStreaming = false
private lateinit var cameraPreviewView: PreviewView
private lateinit var faceOverlayView: FaceOverlayView
private lateinit var faceDetectionPipeline: FaceDetectionPipeline
private var facePipelineReady: Boolean = false
private var cameraProvider: ProcessCameraProvider? = null
private lateinit var cameraAnalyzerExecutor: ExecutorService
override fun onRequestPermissionsResult(
requestCode: Int,
permissions: Array<String>,
grantResults: IntArray
) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
val ok = requestCode == AppConfig.REQUEST_RECORD_AUDIO_PERMISSION &&
grantResults.isNotEmpty() &&
grantResults[0] == PackageManager.PERMISSION_GRANTED
if (!ok) {
if (requestCode != AppConfig.REQUEST_RECORD_AUDIO_PERMISSION) return
if (grantResults.isEmpty()) {
finish()
return
}
val granted = permissions.zip(grantResults.toTypedArray()).associate { it.first to it.second }
val micGranted = granted[Manifest.permission.RECORD_AUDIO] == PackageManager.PERMISSION_GRANTED
val cameraGranted = granted[Manifest.permission.CAMERA] == PackageManager.PERMISSION_GRANTED
if (!micGranted) {
Log.e(AppConfig.TAG, "Audio record is disallowed")
finish()
return
}
if (!cameraGranted) {
uiManager.showToast("未授予相机权限,暂不启用人脸检测")
Log.w(AppConfig.TAG, "Camera permission denied")
return
}
if (facePipelineReady) {
startCameraPreviewAndDetection()
}
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
Log.i(TAG_ACTIVITY, "onCreate")
setContentView(R.layout.activity_live2d_chat)
uiManager = Live2DUiManager(this)
@@ -82,10 +132,28 @@ class Live2DChatActivity : AppCompatActivity() {
stopButtonId = R.id.stop_button,
recordButtonId = R.id.record_button,
traditionalButtonsId = R.id.traditional_buttons,
llmModeSwitchId = R.id.llm_mode_switch,
llmModeSwitchRowId = R.id.llm_mode_switch_row,
silentPlayerViewId = 0,
speakingPlayerViewId = 0,
live2dViewId = R.id.live2d_view
)
cameraPreviewView = findViewById(R.id.camera_preview)
cameraPreviewView.implementationMode = PreviewView.ImplementationMode.COMPATIBLE
faceOverlayView = findViewById(R.id.face_overlay)
cameraAnalyzerExecutor = Executors.newSingleThreadExecutor()
faceDetectionPipeline = FaceDetectionPipeline(
context = applicationContext,
onResult = { result ->
faceOverlayView.updateResult(result)
},
onGreeting = { greeting ->
uiManager.appendToUi("\n[Face] $greeting\n")
ttsController.enqueueSegment(greeting)
ttsController.enqueueEnd()
}
)
// 根据配置选择交互方式
uiManager.setUseHoldToSpeak(AppConfig.USE_HOLD_TO_SPEAK)
@@ -105,7 +173,7 @@ class Live2DChatActivity : AppCompatActivity() {
uiManager.setStopButtonListener { onStopClicked(userInitiated = true) }
}
ActivityCompat.requestPermissions(this, permissions, AppConfig.REQUEST_RECORD_AUDIO_PERMISSION)
ActivityCompat.requestPermissions(this, appPermissions, AppConfig.REQUEST_RECORD_AUDIO_PERMISSION)
try {
val streamingSwitch = findViewById<android.widget.Switch>(R.id.streaming_switch)
@@ -119,6 +187,27 @@ class Live2DChatActivity : AppCompatActivity() {
Log.w(AppConfig.TAG, "Streaming switch not found in layout: ${e.message}")
}
try {
val ttsModeSwitch = findViewById<android.widget.Switch>(R.id.tts_mode_switch)
ttsModeSwitch.isChecked = false // 默认使用本地TTS
ttsModeSwitch.setOnCheckedChangeListener { _, isChecked ->
ttsController.setUseQCloudTts(isChecked)
uiManager.showToast("TTS模式已切换到${if (isChecked) "腾讯云" else "本地"}")
}
} catch (e: Exception) {
Log.w(AppConfig.TAG, "TTS mode switch not found in layout: ${e.message}")
}
// 设置 LLM 模式开关
uiManager.setLLMSwitchListener { isChecked ->
useLocalLLM = isChecked
Log.i(TAG_LLM, "LLM mode switched: useLocalLLM=$useLocalLLM")
uiManager.showToast("LLM模式已切换到${if (isChecked) "本地" else "云端"}")
// 重新初始化 LLM
initLLM()
}
// 默认不显示 LLM 开关,等模型下载完成后再显示
if (AppConfig.USE_HOLD_TO_SPEAK) {
uiManager.setButtonsEnabled(recordEnabled = false)
} else {
@@ -127,8 +216,8 @@ class Live2DChatActivity : AppCompatActivity() {
uiManager.setText("初始化中…")
audioProcessor = AudioProcessor(this)
ttsManager = TtsManager(this)
ttsManager.setCallback(createTtsCallback())
ttsController = TtsController(this)
ttsController.setCallback(createTtsCallback())
asrManager = AsrManager(this)
asrManager.setAudioProcessor(audioProcessor)
@@ -137,6 +226,64 @@ class Live2DChatActivity : AppCompatActivity() {
vadManager = VadManager(this)
vadManager.setCallback(createVadCallback())
// 初始化 LLM 管理器
initLLM()
// 检查是否需要下载模型
if (!FileHelper.isLocalLLMAvailable(this)) {
// 显示下载进度对话框
uiManager.showDownloadProgressDialog()
// 异步下载模型文件
FileHelper.downloadModelFilesWithProgress(
this,
onProgress = { fileName, downloaded, total, progress ->
runOnUiThread {
val downloadedMB = downloaded / (1024 * 1024)
val totalMB = total / (1024 * 1024)
uiManager.updateDownloadProgress(
fileName,
downloadedMB,
totalMB,
progress
)
}
},
onComplete = { success, message ->
runOnUiThread {
uiManager.dismissDownloadProgressDialog()
if (success) {
Log.i(AppConfig.TAG, "Model files downloaded successfully")
uiManager.showToast("模型下载完成", Toast.LENGTH_SHORT)
// 检查本地 LLM 是否可用
if (FileHelper.isLocalLLMAvailable(this)) {
Log.i(AppConfig.TAG, "Local LLM is available, enabling local LLM switch")
// 显示本地 LLM 开关,并同步状态
uiManager.showLLMSwitch(true)
uiManager.setLLMSwitchChecked(useLocalLLM)
}
} else {
Log.e(AppConfig.TAG, "Failed to download model files: $message")
uiManager.showToast("模型下载失败: $message", Toast.LENGTH_LONG)
}
// 下载完成后初始化其他组件
initializeOtherComponents()
}
}
)
} else {
// 模型已存在,直接初始化其他组件
initializeOtherComponents()
// 显示本地 LLM 开关,并同步状态
uiManager.showLLMSwitch(true)
uiManager.setLLMSwitchChecked(useLocalLLM)
}
}
/**
* 初始化其他组件VAD、ASR、TTS、人脸检测等
*/
private fun initializeOtherComponents() {
ioScope.launch {
try {
Log.i(AppConfig.TAG, "Init VAD + SenseVoice(RKNN) + TTS (background)")
@@ -144,7 +291,8 @@ class Live2DChatActivity : AppCompatActivity() {
vadManager.initVadModel()
asrManager.initSenseVoiceModel()
}
val ttsOk = ttsManager.initTtsAndAudioTrack()
val ttsOk = ttsController.init()
facePipelineReady = faceDetectionPipeline.initialize()
withContext(Dispatchers.Main) {
if (!ttsOk) {
uiManager.showToast(
@@ -152,6 +300,11 @@ class Live2DChatActivity : AppCompatActivity() {
Toast.LENGTH_LONG
)
}
if (!facePipelineReady) {
uiManager.showToast("RetinaFace 初始化失败,请检查模型和 rknn 运行库", Toast.LENGTH_LONG)
} else if (allPermissionsGranted()) {
startCameraPreviewAndDetection()
}
uiManager.setText(getString(R.string.hint))
if (AppConfig.USE_HOLD_TO_SPEAK) {
uiManager.setButtonsEnabled(recordEnabled = true)
@@ -203,14 +356,22 @@ class Live2DChatActivity : AppCompatActivity() {
Log.d(AppConfig.TAG, "ASR segment skipped: $reason")
}
override fun shouldSkipAsr(): Boolean = ttsManager.isPlaying()
override fun shouldSkipAsr(): Boolean = ttsController.isPlaying()
override fun isLlmInFlight(): Boolean = llmInFlight
override fun onLlmCalled(text: String) {
llmInFlight = true
Log.d(AppConfig.TAG, "Calling LLM with text: $text")
cloudApiManager.callLLM(text)
if (useLocalLLM) {
Log.i(TAG_LLM, "Routing to LOCAL LLM")
// 使用本地 LLM 生成回复
generateResponse(text)
} else {
Log.i(TAG_LLM, "Routing to CLOUD LLM")
// 使用云端 LLM 生成回复
cloudApiManager.callLLM(text)
}
}
}
@@ -220,7 +381,7 @@ class Live2DChatActivity : AppCompatActivity() {
asrManager.enqueueAudioSegment(originalAudio, processedAudio)
}
override fun shouldSkipProcessing(): Boolean = ttsManager.isPlaying() || llmInFlight
override fun shouldSkipProcessing(): Boolean = ttsController.isPlaying() || llmInFlight
}
private fun createCloudApiListener() = object : CloudApiManager.CloudApiListener {
@@ -232,9 +393,9 @@ class Live2DChatActivity : AppCompatActivity() {
if (enableStreaming) {
for (seg in segmenter.flush()) {
ttsManager.enqueueSegment(seg)
ttsController.enqueueSegment(seg)
}
ttsManager.enqueueEnd()
ttsController.enqueueEnd()
} else {
val previousMood = com.digitalperson.mood.MoodManager.getCurrentMood()
val (filteredText, mood) = com.digitalperson.mood.MoodManager.extractAndFilterMood(response)
@@ -247,8 +408,8 @@ class Live2DChatActivity : AppCompatActivity() {
runOnUiThread {
uiManager.appendToUi("${filteredText}\n")
}
ttsManager.enqueueSegment(filteredText)
ttsManager.enqueueEnd()
ttsController.enqueueSegment(filteredText)
ttsController.enqueueEnd()
}
}
@@ -271,7 +432,7 @@ class Live2DChatActivity : AppCompatActivity() {
val segments = segmenter.processChunk(filteredText)
for (seg in segments) {
ttsManager.enqueueSegment(seg)
ttsController.enqueueSegment(seg)
}
}
}
@@ -285,7 +446,7 @@ class Live2DChatActivity : AppCompatActivity() {
}
}
private fun createTtsCallback() = object : TtsManager.TtsCallback {
private fun createTtsCallback() = object : TtsController.TtsCallback {
override fun onTtsStarted(text: String) {
runOnUiThread {
uiManager.appendToUi("\n[TTS] 开始合成...\n")
@@ -310,32 +471,6 @@ class Live2DChatActivity : AppCompatActivity() {
uiManager.setSpeaking(speaking)
}
override fun getCurrentTrace(): TraceSession? = currentTrace
override fun onTraceMarkTtsRequestEnqueued() {
currentTrace?.markTtsRequestEnqueued()
}
override fun onTraceMarkTtsSynthesisStart() {
currentTrace?.markTtsSynthesisStart()
}
override fun onTraceMarkTtsFirstPcmReady() {
currentTrace?.markTtsFirstPcmReady()
}
override fun onTraceMarkTtsFirstAudioPlay() {
currentTrace?.markTtsFirstAudioPlay()
}
override fun onTraceMarkTtsDone() {
currentTrace?.markTtsDone()
}
override fun onTraceAddDuration(name: String, value: Long) {
currentTrace?.addDuration(name, value)
}
override fun onEndTurn() {
TraceManager.getInstance().endTurn()
currentTrace = null
@@ -344,27 +479,97 @@ class Live2DChatActivity : AppCompatActivity() {
override fun onDestroy() {
super.onDestroy()
stopCameraPreviewAndDetection()
onStopClicked(userInitiated = false)
ioScope.cancel()
synchronized(nativeLock) {
try { vadManager.release() } catch (_: Throwable) {}
try { asrManager.release() } catch (_: Throwable) {}
}
try { ttsManager.release() } catch (_: Throwable) {}
try { faceDetectionPipeline.release() } catch (_: Throwable) {}
try { cameraAnalyzerExecutor.shutdown() } catch (_: Throwable) {}
try { ttsController.release() } catch (_: Throwable) {}
try { llmManager?.destroy() } catch (_: Throwable) {}
try { uiManager.release() } catch (_: Throwable) {}
try { audioProcessor.release() } catch (_: Throwable) {}
}
override fun onResume() {
super.onResume()
Log.i(TAG_ACTIVITY, "onResume")
uiManager.onResume()
if (facePipelineReady && allPermissionsGranted()) {
startCameraPreviewAndDetection()
}
}
override fun onPause() {
Log.i(TAG_ACTIVITY, "onPause")
stopCameraPreviewAndDetection()
uiManager.onPause()
super.onPause()
}
private fun allPermissionsGranted(): Boolean {
return appPermissions.all {
ContextCompat.checkSelfPermission(this, it) == PackageManager.PERMISSION_GRANTED
}
}
private fun startCameraPreviewAndDetection() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
try {
val provider = cameraProviderFuture.get()
cameraProvider = provider
provider.unbindAll()
val preview = Preview.Builder().build().apply {
setSurfaceProvider(cameraPreviewView.surfaceProvider)
}
cameraPreviewView.scaleType = PreviewView.ScaleType.FIT_CENTER
val analyzer = ImageAnalysis.Builder()
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
analyzer.setAnalyzer(cameraAnalyzerExecutor) { imageProxy ->
analyzeCameraFrame(imageProxy)
}
val selector = CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_FRONT)
.build()
provider.bindToLifecycle(this, selector, preview, analyzer)
} catch (t: Throwable) {
Log.e(AppConfig.TAG, "startCameraPreviewAndDetection failed: ${t.message}", t)
}
}, ContextCompat.getMainExecutor(this))
}
private fun stopCameraPreviewAndDetection() {
try {
cameraProvider?.unbindAll()
} catch (_: Throwable) {
} finally {
cameraProvider = null
}
}
private fun analyzeCameraFrame(imageProxy: ImageProxy) {
try {
val bitmap: Bitmap? = ImageProxyBitmapConverter.toBitmap(imageProxy)
if (bitmap != null) {
faceDetectionPipeline.submitFrame(bitmap)
}
} catch (t: Throwable) {
Log.w(AppConfig.TAG, "analyzeCameraFrame error: ${t.message}")
} finally {
imageProxy.close()
}
}
private fun onStartClicked() {
Log.d(AppConfig.TAG, "onStartClicked called")
if (isRecording) {
@@ -372,7 +577,7 @@ class Live2DChatActivity : AppCompatActivity() {
return
}
if (!audioProcessor.initMicrophone(permissions, AppConfig.REQUEST_RECORD_AUDIO_PERMISSION)) {
if (!audioProcessor.initMicrophone(micPermissions, AppConfig.REQUEST_RECORD_AUDIO_PERMISSION)) {
uiManager.showToast("麦克风初始化失败/无权限")
return
}
@@ -383,8 +588,7 @@ class Live2DChatActivity : AppCompatActivity() {
uiManager.clearText()
ttsManager.reset()
ttsManager.setCurrentTrace(currentTrace)
ttsController.reset()
segmenter.reset()
vadManager.reset()
@@ -409,12 +613,12 @@ class Live2DChatActivity : AppCompatActivity() {
}
// 如果TTS正在播放打断它
val interrupted = ttsManager.interruptForNewTurn()
val interrupted = ttsController.interruptForNewTurn()
if (interrupted) {
uiManager.appendToUi("\n[LOG] 已打断TTS播放\n")
}
if (!audioProcessor.initMicrophone(permissions, AppConfig.REQUEST_RECORD_AUDIO_PERMISSION)) {
if (!audioProcessor.initMicrophone(micPermissions, AppConfig.REQUEST_RECORD_AUDIO_PERMISSION)) {
uiManager.showToast("麦克风初始化失败/无权限")
return
}
@@ -427,7 +631,7 @@ class Live2DChatActivity : AppCompatActivity() {
// interruptForNewTurn() already prepared TTS state for next turn.
// Keep reset() only for non-interrupt entry points.
ttsManager.setCurrentTrace(currentTrace)
segmenter.reset()
// 启动按住说话的动作
@@ -479,7 +683,7 @@ class Live2DChatActivity : AppCompatActivity() {
recordingJob?.cancel()
recordingJob = null
ttsManager.stop()
ttsController.stop()
if (AppConfig.USE_HOLD_TO_SPEAK) {
uiManager.setButtonsEnabled(recordEnabled = true)
@@ -515,10 +719,10 @@ class Live2DChatActivity : AppCompatActivity() {
while (isRecording && ioScope.coroutineContext.isActive) {
loopCount++
if (loopCount % 100 == 0) {
Log.d(AppConfig.TAG, "processSamplesLoop running, loopCount=$loopCount, ttsPlaying=${ttsManager.isPlaying()}")
Log.d(AppConfig.TAG, "processSamplesLoop running, loopCount=$loopCount, ttsPlaying=${ttsController.isPlaying()}")
}
if (ttsManager.isPlaying()) {
if (ttsController.isPlaying()) {
if (vadManager.isInSpeech()) {
Log.d(AppConfig.TAG, "TTS playing, resetting VAD state")
vadManager.clearState()
@@ -546,11 +750,134 @@ class Live2DChatActivity : AppCompatActivity() {
}
val forced = segmenter.maybeForceByTime()
for (seg in forced) ttsManager.enqueueSegment(seg)
for (seg in forced) ttsController.enqueueSegment(seg)
}
vadManager.forceFinalize()
}
Log.d(AppConfig.TAG, "processSamplesLoop stopped")
}
/**
* 初始化 LLM 管理器
*/
private fun initLLM() {
try {
Log.i(TAG_LLM, "initLLM called, useLocalLLM=$useLocalLLM")
llmManager?.destroy()
llmManager = null
if (useLocalLLM) {
// // 本地 LLM 初始化前,先暂停/释放重模块
// Log.i(AppConfig.TAG, "Pausing camera and releasing face detection before LLM initialization")
// stopCameraPreviewAndDetection()
// try {
// faceDetectionPipeline.release()
// Log.i(AppConfig.TAG, "Face detection pipeline released")
// } catch (e: Exception) {
// Log.w(AppConfig.TAG, "Failed to release face detection pipeline: ${e.message}")
// }
// // 释放 VAD 管理器
// try {
// vadManager.release()
// Log.i(AppConfig.TAG, "VAD manager released")
// } catch (e: Exception) {
// Log.w(AppConfig.TAG, "Failed to release VAD manager: ${e.message}")
// }
val modelPath = FileHelper.getLLMModelPath(applicationContext)
if (!File(modelPath).exists()) {
throw IllegalStateException("RKLLM model file missing: $modelPath")
}
Log.i(AppConfig.TAG, "Initializing LLM with model path: $modelPath")
val localLlmResponseBuffer = StringBuilder()
llmManager = LLMManager(modelPath, object : LLMManagerCallback {
override fun onThinking(msg: String, finished: Boolean) {
// 处理思考过程
Log.d(TAG_LLM, "LOCAL onThinking finished=$finished msg=${msg.take(60)}")
runOnUiThread {
if (!finished && enableStreaming) {
uiManager.appendToUi("\n[LLM] 思考中: $msg\n")
}
}
}
override fun onResult(msg: String, finished: Boolean) {
// 处理生成结果
Log.d(TAG_LLM, "LOCAL onResult finished=$finished len=${msg.length}")
runOnUiThread {
if (!finished) {
localLlmResponseBuffer.append(msg)
if (enableStreaming) {
uiManager.appendToUi(msg)
}
} else {
val finalText = localLlmResponseBuffer.toString().trim()
localLlmResponseBuffer.setLength(0)
if (!enableStreaming && finalText.isNotEmpty()) {
uiManager.appendToUi("$finalText\n")
}
uiManager.appendToUi("\n\n[LLM] 生成完成\n")
llmInFlight = false
if (finalText.isNotEmpty()) {
ttsController.enqueueSegment(finalText)
ttsController.enqueueEnd()
} else {
Log.w(TAG_LLM, "LOCAL final text is empty, skip TTS enqueue")
}
}
}
}
})
Log.i(AppConfig.TAG, "LLM initialized successfully")
Log.i(TAG_LLM, "LOCAL LLM initialized")
} else {
// 使用云端 LLM不需要初始化本地 LLM
Log.i(AppConfig.TAG, "Using cloud LLM, skipping local LLM initialization")
Log.i(TAG_LLM, "CLOUD mode active")
}
} catch (e: Exception) {
Log.e(AppConfig.TAG, "Failed to initialize LLM: ${e.message}", e)
Log.e(TAG_LLM, "LOCAL init failed: ${e.message}", e)
useLocalLLM = false
runOnUiThread {
uiManager.setLLMSwitchChecked(false)
uiManager.showToast("LLM 初始化失败: ${e.message}", Toast.LENGTH_LONG)
uiManager.appendToUi("\n[错误] LLM 初始化失败: ${e.message}\n")
}
}
}
/**
* 使用 LLM 生成回复
*/
private fun generateResponse(userInput: String) {
try {
if (useLocalLLM) {
val systemPrompt = "你是一个友好的数字人助手,回答要简洁明了。"
Log.d(AppConfig.TAG, "Generating response for: $userInput")
val local = llmManager
if (local == null) {
Log.e(TAG_LLM, "LOCAL LLM manager is null, fallback to CLOUD")
cloudApiManager.callLLM(userInput)
return
}
Log.i(TAG_LLM, "LOCAL generateResponseWithSystem")
local.generateResponseWithSystem(systemPrompt, userInput)
} else {
// 使用云端 LLM
Log.d(AppConfig.TAG, "Using cloud LLM for response: $userInput")
Log.i(TAG_LLM, "CLOUD callLLM")
// 调用云端 LLM
cloudApiManager.callLLM(userInput)
}
} catch (e: Exception) {
Log.e(AppConfig.TAG, "Failed to generate response: ${e.message}", e)
Log.e(TAG_LLM, "generateResponse failed: ${e.message}", e)
runOnUiThread {
uiManager.appendToUi("\n\n[Error] LLM 生成失败: ${e.message}\n")
llmInFlight = false
}
}
}
}