Browse Source

android: routine maintenance - Dec 2025 (#18338)

* Fix `msg` typo

* Fix thread safety in destroy() to support generation abortion in lifecycle callbacks.

* UI polish: stack new message change from below; fix GGUF margin not in view port

* Bug fixes: rare racing condition when main thread updating view and and default thread updating messages at the same time; user input not disabled during generation.

* Bump dependencies' versions; Deprecated outdated dsl usage.
Naco Siren 1 month ago
parent
commit
c1366056f6

+ 2 - 5
examples/llama.android/app/build.gradle.kts

@@ -41,11 +41,8 @@ android {
         }
     }
     compileOptions {
-        sourceCompatibility = JavaVersion.VERSION_1_8
-        targetCompatibility = JavaVersion.VERSION_1_8
-    }
-    kotlinOptions {
-        jvmTarget = "1.8"
+        sourceCompatibility = JavaVersion.VERSION_17
+        targetCompatibility = JavaVersion.VERSION_17
     }
 }
 

+ 30 - 12
examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt

@@ -6,6 +6,7 @@ import android.util.Log
 import android.widget.EditText
 import android.widget.TextView
 import android.widget.Toast
+import androidx.activity.addCallback
 import androidx.activity.enableEdgeToEdge
 import androidx.activity.result.contract.ActivityResultContracts
 import androidx.appcompat.app.AppCompatActivity
@@ -18,6 +19,7 @@ import com.arm.aichat.gguf.GgufMetadata
 import com.arm.aichat.gguf.GgufMetadataReader
 import com.google.android.material.floatingactionbutton.FloatingActionButton
 import kotlinx.coroutines.Dispatchers
+import kotlinx.coroutines.Job
 import kotlinx.coroutines.flow.onCompletion
 import kotlinx.coroutines.launch
 import kotlinx.coroutines.withContext
@@ -36,6 +38,7 @@ class MainActivity : AppCompatActivity() {
 
     // Arm AI Chat inference engine
     private lateinit var engine: InferenceEngine
+    private var generationJob: Job? = null
 
     // Conversation states
     private var isModelReady = false
@@ -47,11 +50,13 @@ class MainActivity : AppCompatActivity() {
         super.onCreate(savedInstanceState)
         enableEdgeToEdge()
         setContentView(R.layout.activity_main)
+        // View model boilerplate and state management is out of this basic sample's scope
+        onBackPressedDispatcher.addCallback { Log.w(TAG, "Ignore back press for simplicity") }
 
         // Find views
         ggufTv = findViewById(R.id.gguf)
         messagesRv = findViewById(R.id.messages)
-        messagesRv.layoutManager = LinearLayoutManager(this)
+        messagesRv.layoutManager = LinearLayoutManager(this).apply { stackFromEnd = true }
         messagesRv.adapter = messageAdapter
         userInputEt = findViewById(R.id.user_input)
         userActionFab = findViewById(R.id.fab)
@@ -157,33 +162,35 @@ class MainActivity : AppCompatActivity() {
      * Validate and send the user message into [InferenceEngine]
      */
     private fun handleUserInput() {
-        userInputEt.text.toString().also { userSsg ->
-            if (userSsg.isEmpty()) {
+        userInputEt.text.toString().also { userMsg ->
+            if (userMsg.isEmpty()) {
                 Toast.makeText(this, "Input message is empty!", Toast.LENGTH_SHORT).show()
             } else {
                 userInputEt.text = null
+                userInputEt.isEnabled = false
                 userActionFab.isEnabled = false
 
                 // Update message states
-                messages.add(Message(UUID.randomUUID().toString(), userSsg, true))
+                messages.add(Message(UUID.randomUUID().toString(), userMsg, true))
                 lastAssistantMsg.clear()
                 messages.add(Message(UUID.randomUUID().toString(), lastAssistantMsg.toString(), false))
 
-                lifecycleScope.launch(Dispatchers.Default) {
-                    engine.sendUserPrompt(userSsg)
+                generationJob = lifecycleScope.launch(Dispatchers.Default) {
+                    engine.sendUserPrompt(userMsg)
                         .onCompletion {
                             withContext(Dispatchers.Main) {
+                                userInputEt.isEnabled = true
                                 userActionFab.isEnabled = true
                             }
                         }.collect { token ->
-                            val messageCount = messages.size
-                            check(messageCount > 0 && !messages[messageCount - 1].isUser)
+                            withContext(Dispatchers.Main) {
+                                val messageCount = messages.size
+                                check(messageCount > 0 && !messages[messageCount - 1].isUser)
 
-                            messages.removeAt(messageCount - 1).copy(
-                                content = lastAssistantMsg.append(token).toString()
-                            ).let { messages.add(it) }
+                                messages.removeAt(messageCount - 1).copy(
+                                    content = lastAssistantMsg.append(token).toString()
+                                ).let { messages.add(it) }
 
-                            withContext(Dispatchers.Main) {
                                 messageAdapter.notifyItemChanged(messages.size - 1)
                             }
                         }
@@ -195,6 +202,7 @@ class MainActivity : AppCompatActivity() {
     /**
      * Run a benchmark with the model file
      */
+    @Deprecated("This benchmark doesn't accurately indicate GUI performance expected by app developers")
     private suspend fun runBenchmark(modelName: String, modelFile: File) =
         withContext(Dispatchers.Default) {
             Log.i(TAG, "Starts benchmarking $modelName")
@@ -223,6 +231,16 @@ class MainActivity : AppCompatActivity() {
             if (!it.exists()) { it.mkdir() }
         }
 
+    override fun onStop() {
+        generationJob?.cancel()
+        super.onStop()
+    }
+
+    override fun onDestroy() {
+        engine.destroy()
+        super.onDestroy()
+    }
+
     companion object {
         private val TAG = MainActivity::class.java.simpleName
 

+ 2 - 3
examples/llama.android/app/src/main/res/layout/activity_main.xml

@@ -24,7 +24,7 @@
                 android:id="@+id/gguf"
                 android:layout_width="match_parent"
                 android:layout_height="wrap_content"
-                android:layout_margin="16dp"
+                android:padding="16dp"
                 android:text="Selected GGUF model's metadata will show here."
                 style="@style/TextAppearance.MaterialComponents.Body2" />
 
@@ -33,8 +33,7 @@
         <com.google.android.material.divider.MaterialDivider
             android:layout_width="match_parent"
             android:layout_height="2dp"
-            android:layout_marginHorizontal="16dp"
-            android:layout_marginVertical="8dp" />
+            android:layout_marginHorizontal="16dp" />
 
         <androidx.recyclerview.widget.RecyclerView
             android:id="@+id/messages"

+ 4 - 4
examples/llama.android/gradle/libs.versions.toml

@@ -1,15 +1,15 @@
 [versions]
 
 # Plugins
-agp = "8.13.0"
-kotlin = "2.2.20"
+agp = "8.13.2"
+kotlin = "2.3.0"
 
 # AndroidX
-activity = "1.11.0"
+activity = "1.12.2"
 appcompat = "1.7.1"
 core-ktx = "1.17.0"
 constraint-layout = "2.2.1"
-datastore-preferences = "1.1.7"
+datastore-preferences = "1.2.0"
 
 # Material
 material = "1.13.0"

+ 1 - 1
examples/llama.android/lib/src/main/cpp/ai_chat.cpp

@@ -560,6 +560,6 @@ Java_com_arm_aichat_internal_InferenceEngineImpl_unload(JNIEnv * /*unused*/, job
 
 extern "C"
 JNIEXPORT void JNICALL
-Java_com_arm_aichat_internal_InferenceEngineImpl_shutdown(JNIEnv *env, jobject /*unused*/) {
+Java_com_arm_aichat_internal_InferenceEngineImpl_shutdown(JNIEnv *, jobject /*unused*/) {
     llama_backend_free();
 }

+ 1 - 1
examples/llama.android/lib/src/main/java/com/arm/aichat/InferenceEngine.kt

@@ -38,7 +38,7 @@ interface InferenceEngine {
     /**
      * Unloads the currently loaded model.
      */
-    suspend fun cleanUp()
+    fun cleanUp()
 
     /**
      * Cleans up resources when the engine is no longer needed.

+ 27 - 12
examples/llama.android/lib/src/main/java/com/arm/aichat/internal/InferenceEngineImpl.kt

@@ -15,9 +15,11 @@ import kotlinx.coroutines.cancel
 import kotlinx.coroutines.flow.Flow
 import kotlinx.coroutines.flow.MutableStateFlow
 import kotlinx.coroutines.flow.StateFlow
+import kotlinx.coroutines.flow.asStateFlow
 import kotlinx.coroutines.flow.flow
 import kotlinx.coroutines.flow.flowOn
 import kotlinx.coroutines.launch
+import kotlinx.coroutines.runBlocking
 import kotlinx.coroutines.withContext
 import java.io.File
 import java.io.IOException
@@ -109,9 +111,11 @@ internal class InferenceEngineImpl private constructor(
 
     private val _state =
         MutableStateFlow<InferenceEngine.State>(InferenceEngine.State.Uninitialized)
-    override val state: StateFlow<InferenceEngine.State> = _state
+    override val state: StateFlow<InferenceEngine.State> = _state.asStateFlow()
 
     private var _readyForSystemPrompt = false
+    @Volatile
+    private var _cancelGeneration = false
 
     /**
      * Single-threaded coroutine dispatcher & scope for LLama asynchronous operations
@@ -169,6 +173,8 @@ internal class InferenceEngineImpl private constructor(
                 }
                 Log.i(TAG, "Model loaded!")
                 _readyForSystemPrompt = true
+
+                _cancelGeneration = false
                 _state.value = InferenceEngine.State.ModelReady
             } catch (e: Exception) {
                 Log.e(TAG, (e.message ?: "Error loading model") + "\n" + pathToModel, e)
@@ -231,15 +237,19 @@ internal class InferenceEngineImpl private constructor(
 
             Log.i(TAG, "User prompt processed. Generating assistant prompt...")
             _state.value = InferenceEngine.State.Generating
-            while (true) {
+            while (!_cancelGeneration) {
                 generateNextToken()?.let { utf8token ->
                     if (utf8token.isNotEmpty()) emit(utf8token)
                 } ?: break
             }
-            Log.i(TAG, "Assistant generation complete. Awaiting user prompt...")
+            if (_cancelGeneration) {
+                Log.i(TAG, "Assistant generation aborted per requested.")
+            } else {
+                Log.i(TAG, "Assistant generation complete. Awaiting user prompt...")
+            }
             _state.value = InferenceEngine.State.ModelReady
         } catch (e: CancellationException) {
-            Log.i(TAG, "Generation cancelled by user.")
+            Log.i(TAG, "Assistant generation's flow collection cancelled.")
             _state.value = InferenceEngine.State.ModelReady
             throw e
         } catch (e: Exception) {
@@ -268,8 +278,9 @@ internal class InferenceEngineImpl private constructor(
     /**
      * Unloads the model and frees resources, or reset error states
      */
-    override suspend fun cleanUp() =
-        withContext(llamaDispatcher) {
+    override fun cleanUp() {
+        _cancelGeneration = true
+        runBlocking(llamaDispatcher) {
             when (val state = _state.value) {
                 is InferenceEngine.State.ModelReady -> {
                     Log.i(TAG, "Unloading model and free resources...")
@@ -293,17 +304,21 @@ internal class InferenceEngineImpl private constructor(
                 else -> throw IllegalStateException("Cannot unload model in ${state.javaClass.simpleName}")
             }
         }
+    }
 
     /**
      * Cancel all ongoing coroutines and free GGML backends
      */
     override fun destroy() {
-        _readyForSystemPrompt = false
-        llamaScope.cancel()
-        when(_state.value) {
-            is InferenceEngine.State.Uninitialized -> {}
-            is InferenceEngine.State.Initialized -> shutdown()
-            else -> { unload(); shutdown() }
+        _cancelGeneration = true
+        runBlocking(llamaDispatcher) {
+            _readyForSystemPrompt = false
+            when(_state.value) {
+                is InferenceEngine.State.Uninitialized -> {}
+                is InferenceEngine.State.Initialized -> shutdown()
+                else -> { unload(); shutdown() }
+            }
         }
+        llamaScope.cancel()
     }
 }