package com.digitalperson.config import com.digitalperson.BuildConfig object AppConfig { const val TAG = "DigitalPerson" const val REQUEST_RECORD_AUDIO_PERMISSION = 200 const val SAMPLE_RATE = 16000 const val WINDOW_SIZE = 512 const val SHOW_DEBUG_TEXT = true const val USE_HOLD_TO_SPEAK = true // true: 按住说话, false: 传统按钮 object Tts { const val MODEL_DIR = "tts_model/sherpa-onnx-vits-zh-ll" const val MODEL_NAME = "model.onnx" const val LEXICON = "lexicon.txt" const val SPEAKER_ID = 2 const val SPEED = 1.0f const val MAX_LEN = 30 const val MAX_WAIT_MS: Long = 600 } object Vad { const val START_THRESHOLD = 0.2f const val END_THRESHOLD = 0.15f const val MIN_SILENCE_DURATION = 0.5f const val MIN_SPEECH_DURATION = 0.1f const val MAX_SPEECH_DURATION = 5.0f } object Asr { const val MAX_TEXT_LENGTH = 50 const val MODEL_DIR = "sensevoice_models" } object Face { const val MODEL_DIR = "RetinaFace" const val MODEL_NAME = "RetinaFace_mobile320.rknn" const val INPUT_SIZE = 320 const val SCORE_THRESHOLD = 0.6f const val NMS_THRESHOLD = 0.4f const val TRACK_IOU_THRESHOLD = 0.45f const val STABLE_MS = 1000L const val FRONTAL_MIN_FACE_SIZE = 90f const val FRONTAL_MAX_ASPECT_DIFF = 0.35f } object FaceRecognition { const val MODEL_DIR = "Insightface" const val MODEL_NAME = "ms1mv3_arcface_r18.rknn" const val SIMILARITY_THRESHOLD = 0.6f const val GREETING_COOLDOWN_MS = 6000L } object Audio { const val GAIN_SMOOTHING_FACTOR = 0.1f const val TARGET_RMS = 0.1f } object Avatar { // Compile-time switch in gradle.properties/local.properties: USE_LIVE2D=true|false // const val USE_LIVE2D = BuildConfig.USE_LIVE2D // const val MODEL_DIR = "live2d_model/mao_pro_zh" // const val MODEL_JSON = "mao_pro.model3.json" // const val MODEL_DIR = "live2d_model/Haru_pro_jp" // const val MODEL_JSON = "haru_greeter_t05.model3.json" // 数字人类型: "live2d" 或 "unity" const val DIGITAL_PERSON_TYPE = "unity" // Live2D 配置 const val LIVE2D_MODEL_DIR = "live2d_model/Haru_pro_jp" const val LIVE2D_MODEL_JSON = "haru_greeter_t05.model3.json" const val LIVE2D_SCALE = 1.0f // Unity 配置 const val UNITY_MODEL_PATH = "asobi_chan_b" const val UNITY_SCALE = 1.0f // 检查是否使用Unity fun isUnity(): Boolean { return DIGITAL_PERSON_TYPE == "unity" } // 检查是否使用Live2D fun isLive2D(): Boolean { return DIGITAL_PERSON_TYPE == "live2d" } } object QCloud { const val APP_ID = "1302849512" // 替换为你的腾讯云APP_ID const val SECRET_ID = "AKIDbBdyBGE5oPuIGA1iDlDYlFallaJ0YODB" // 替换为你的腾讯云SECRET_ID const val SECRET_KEY = "32vhIl9OQIRclmLjvuleLp9LLAnFVYEp" // 替换为你的腾讯云SECRET_KEY } object LLM { // 模型下载服务器地址 const val DOWNLOAD_SERVER = "http://192.168.1.19:5000" // 下载路径 const val DOWNLOAD_PATH = "/download" // 模型文件名 const val MODEL_FILE_NAME = "Qwen3-0.6B-rk3588-w8a8.rkllm" // 模型存储目录 const val MODEL_DIR = "llm" // 下载连接超时(毫秒) const val DOWNLOAD_CONNECT_TIMEOUT = 600000 // 下载读取超时(毫秒) const val DOWNLOAD_READ_TIMEOUT = 1200000 // 模型文件大小估计(字节) const val MODEL_SIZE_ESTIMATE = 500L * 1024 * 1024 // 500MB } /** BGE-small-zh-v1.5 文本嵌入(RKNN),用于语义相似度 / 检索。 */ object Bge { const val ASSET_DIR = "bge_models" const val MODEL_FILE = "bge-small-zh-v1.5.rknn" } /** * app/note/ref 通过 Gradle 额外 assets 目录打入 apk 后,在 assets 中的根路径为 `ref/`。 */ object RefCorpus { const val ASSETS_ROOT = "ref" } object OnboardTesting { // 测试人脸识别 const val FACE_REGONITION = false // 测试本地大模型 const val LOCAL_LLM_SUMMARY = false } }