hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/android/binder_alloc.c
....@@ -1,18 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* binder_alloc.c
23 *
34 * Android IPC Subsystem
45 *
56 * Copyright (C) 2007-2017 Google, Inc.
6
- *
7
- * This software is licensed under the terms of the GNU General Public
8
- * License version 2, as published by the Free Software Foundation, and
9
- * may be copied, distributed, and modified under those terms.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
167 */
178
189 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -31,8 +22,10 @@
3122 #include <asm/cacheflush.h>
3223 #include <linux/uaccess.h>
3324 #include <linux/highmem.h>
25
+#include <linux/sizes.h>
3426 #include "binder_alloc.h"
3527 #include "binder_trace.h"
28
+#include <trace/hooks/binder.h>
3629
3730 struct list_lru binder_alloc_lru;
3831
....@@ -165,7 +158,7 @@
165158 }
166159
167160 /**
168
- * binder_alloc_buffer_lookup() - get buffer given user ptr
161
+ * binder_alloc_prepare_to_free() - get buffer given user ptr
169162 * @alloc: binder_alloc for this proc
170163 * @user_ptr: User pointer to buffer data
171164 *
....@@ -220,7 +213,7 @@
220213 mm = alloc->vma_vm_mm;
221214
222215 if (mm) {
223
- down_read(&mm->mmap_sem);
216
+ mmap_write_lock(mm);
224217 vma = alloc->vma;
225218 }
226219
....@@ -276,10 +269,9 @@
276269 alloc->pages_high = index + 1;
277270
278271 trace_binder_alloc_page_end(alloc, index);
279
- /* vm_insert_page does not seem to increment the refcount */
280272 }
281273 if (mm) {
282
- up_read(&mm->mmap_sem);
274
+ mmap_write_unlock(mm);
283275 mmput(mm);
284276 }
285277 return 0;
....@@ -312,7 +304,7 @@
312304 }
313305 err_no_vma:
314306 if (mm) {
315
- up_read(&mm->mmap_sem);
307
+ mmap_write_unlock(mm);
316308 mmput(mm);
317309 }
318310 return vma ? -ENOMEM : -ESRCH;
....@@ -347,7 +339,7 @@
347339 return vma;
348340 }
349341
350
-static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
342
+static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
351343 {
352344 /*
353345 * Find the amount and size of buffers allocated by the current caller;
....@@ -356,7 +348,7 @@
356348 * and at some point we'll catch them in the act. This is more efficient
357349 * than keeping a map per pid.
358350 */
359
- struct rb_node *n = alloc->free_buffers.rb_node;
351
+ struct rb_node *n;
360352 struct binder_buffer *buffer;
361353 size_t total_alloc_size = 0;
362354 size_t num_buffers = 0;
....@@ -375,13 +367,19 @@
375367
376368 /*
377369 * Warn if this pid has more than 50 transactions, or more than 50% of
378
- * async space (which is 25% of total buffer size).
370
+ * async space (which is 25% of total buffer size). Oneway spam is only
371
+ * detected when the threshold is exceeded.
379372 */
380373 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
381374 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
382375 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
383376 alloc->pid, pid, num_buffers, total_alloc_size);
377
+ if (!alloc->oneway_spam_detected) {
378
+ alloc->oneway_spam_detected = true;
379
+ return true;
380
+ }
384381 }
382
+ return false;
385383 }
386384
387385 static struct binder_buffer *binder_alloc_new_buf_locked(
....@@ -424,6 +422,7 @@
424422 alloc->pid, extra_buffers_size);
425423 return ERR_PTR(-EINVAL);
426424 }
425
+ trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
427426 if (is_async &&
428427 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
429428 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
....@@ -534,6 +533,7 @@
534533 buffer->async_transaction = is_async;
535534 buffer->extra_buffers_size = extra_buffers_size;
536535 buffer->pid = pid;
536
+ buffer->oneway_spam_suspect = false;
537537 if (is_async) {
538538 alloc->free_async_space -= size + sizeof(struct binder_buffer);
539539 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
....@@ -545,7 +545,9 @@
545545 * of async space left (which is less than 10% of total
546546 * buffer size).
547547 */
548
- debug_low_async_space_locked(alloc, pid);
548
+ buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
549
+ } else {
550
+ alloc->oneway_spam_detected = false;
549551 }
550552 }
551553 return buffer;
....@@ -605,6 +607,7 @@
605607 {
606608 struct binder_buffer *prev, *next = NULL;
607609 bool to_free = true;
610
+
608611 BUG_ON(alloc->buffers.next == &buffer->entry);
609612 prev = binder_buffer_prev(buffer);
610613 BUG_ON(!prev->free);
....@@ -704,16 +707,30 @@
704707 binder_insert_free_buffer(alloc, buffer);
705708 }
706709
710
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
711
+ struct binder_buffer *buffer);
707712 /**
708713 * binder_alloc_free_buf() - free a binder buffer
709714 * @alloc: binder_alloc for this proc
710715 * @buffer: kernel pointer to buffer
711716 *
712
- * Free the buffer allocated via binder_alloc_new_buffer()
717
+ * Free the buffer allocated via binder_alloc_new_buf()
713718 */
714719 void binder_alloc_free_buf(struct binder_alloc *alloc,
715720 struct binder_buffer *buffer)
716721 {
722
+ /*
723
+ * We could eliminate the call to binder_alloc_clear_buf()
724
+ * from binder_alloc_deferred_release() by moving this to
725
+ * binder_alloc_free_buf_locked(). However, that could
726
+ * increase contention for the alloc mutex if clear_on_free
727
+ * is used frequently for large buffers. The mutex is not
728
+ * needed for correctness here.
729
+ */
730
+ if (buffer->clear_on_free) {
731
+ binder_alloc_clear_buf(alloc, buffer);
732
+ buffer->clear_on_free = false;
733
+ }
717734 mutex_lock(&alloc->mutex);
718735 binder_free_buf_locked(alloc, buffer);
719736 mutex_unlock(&alloc->mutex);
....@@ -740,16 +757,18 @@
740757 struct binder_buffer *buffer;
741758
742759 mutex_lock(&binder_alloc_mmap_lock);
743
- if (alloc->buffer) {
760
+ if (alloc->buffer_size) {
744761 ret = -EBUSY;
745762 failure_string = "already mapped";
746763 goto err_already_mapped;
747764 }
748
-
749
- alloc->buffer = (void __user *)vma->vm_start;
765
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
766
+ SZ_4M);
750767 mutex_unlock(&binder_alloc_mmap_lock);
751768
752
- alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
769
+ alloc->buffer = (void __user *)vma->vm_start;
770
+
771
+ alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
753772 sizeof(alloc->pages[0]),
754773 GFP_KERNEL);
755774 if (alloc->pages == NULL) {
....@@ -757,7 +776,6 @@
757776 failure_string = "alloc page array";
758777 goto err_alloc_pages_failed;
759778 }
760
- alloc->buffer_size = vma->vm_end - vma->vm_start;
761779
762780 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
763781 if (!buffer) {
....@@ -780,8 +798,9 @@
780798 kfree(alloc->pages);
781799 alloc->pages = NULL;
782800 err_alloc_pages_failed:
783
- mutex_lock(&binder_alloc_mmap_lock);
784801 alloc->buffer = NULL;
802
+ mutex_lock(&binder_alloc_mmap_lock);
803
+ alloc->buffer_size = 0;
785804 err_already_mapped:
786805 mutex_unlock(&binder_alloc_mmap_lock);
787806 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
....@@ -808,6 +827,10 @@
808827 /* Transaction should already have been freed */
809828 BUG_ON(buffer->transaction);
810829
830
+ if (buffer->clear_on_free) {
831
+ binder_alloc_clear_buf(alloc, buffer);
832
+ buffer->clear_on_free = false;
833
+ }
811834 binder_free_buf_locked(alloc, buffer);
812835 buffers++;
813836 }
....@@ -964,6 +987,7 @@
964987 struct list_lru_one *lru,
965988 spinlock_t *lock,
966989 void *cb_arg)
990
+ __must_hold(lock)
967991 {
968992 struct mm_struct *mm = NULL;
969993 struct binder_lru_page *page = container_of(item,
....@@ -987,8 +1011,8 @@
9871011 mm = alloc->vma_vm_mm;
9881012 if (!mmget_not_zero(mm))
9891013 goto err_mmget;
990
- if (!down_read_trylock(&mm->mmap_sem))
991
- goto err_down_read_mmap_sem_failed;
1014
+ if (!mmap_read_trylock(mm))
1015
+ goto err_mmap_read_lock_failed;
9921016 vma = binder_alloc_get_vma(alloc);
9931017
9941018 list_lru_isolate(lru, item);
....@@ -1001,7 +1025,7 @@
10011025
10021026 trace_binder_unmap_user_end(alloc, index);
10031027 }
1004
- up_read(&mm->mmap_sem);
1028
+ mmap_read_unlock(mm);
10051029 mmput_async(mm);
10061030
10071031 trace_binder_unmap_kernel_start(alloc, index);
....@@ -1015,7 +1039,7 @@
10151039 mutex_unlock(&alloc->mutex);
10161040 return LRU_REMOVED_RETRY;
10171041
1018
-err_down_read_mmap_sem_failed:
1042
+err_mmap_read_lock_failed:
10191043 mmput_async(mm);
10201044 err_mmget:
10211045 err_page_already_freed:
....@@ -1071,6 +1095,12 @@
10711095 list_lru_destroy(&binder_alloc_lru);
10721096 }
10731097 return ret;
1098
+}
1099
+
1100
+void binder_alloc_shrinker_exit(void)
1101
+{
1102
+ unregister_shrinker(&binder_shrinker);
1103
+ list_lru_destroy(&binder_alloc_lru);
10741104 }
10751105
10761106 /**
....@@ -1141,6 +1171,36 @@
11411171 }
11421172
11431173 /**
1174
+ * binder_alloc_clear_buf() - zero out buffer
1175
+ * @alloc: binder_alloc for this proc
1176
+ * @buffer: binder buffer to be cleared
1177
+ *
1178
+ * memset the given buffer to 0
1179
+ */
1180
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1181
+ struct binder_buffer *buffer)
1182
+{
1183
+ size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1184
+ binder_size_t buffer_offset = 0;
1185
+
1186
+ while (bytes) {
1187
+ unsigned long size;
1188
+ struct page *page;
1189
+ pgoff_t pgoff;
1190
+ void *kptr;
1191
+
1192
+ page = binder_alloc_get_page(alloc, buffer,
1193
+ buffer_offset, &pgoff);
1194
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1195
+ kptr = kmap(page) + pgoff;
1196
+ memset(kptr, 0, size);
1197
+ kunmap(page);
1198
+ bytes -= size;
1199
+ buffer_offset += size;
1200
+ }
1201
+}
1202
+
1203
+/**
11441204 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
11451205 * @alloc: binder_alloc for this proc
11461206 * @buffer: binder buffer to be accessed
....@@ -1184,15 +1244,16 @@
11841244 return 0;
11851245 }
11861246
1187
-static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1188
- bool to_buffer,
1189
- struct binder_buffer *buffer,
1190
- binder_size_t buffer_offset,
1191
- void *ptr,
1192
- size_t bytes)
1247
+static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1248
+ bool to_buffer,
1249
+ struct binder_buffer *buffer,
1250
+ binder_size_t buffer_offset,
1251
+ void *ptr,
1252
+ size_t bytes)
11931253 {
11941254 /* All copies must be 32-bit aligned and 32-bit size */
1195
- BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
1255
+ if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1256
+ return -EINVAL;
11961257
11971258 while (bytes) {
11981259 unsigned long size;
....@@ -1220,25 +1281,26 @@
12201281 ptr = ptr + size;
12211282 buffer_offset += size;
12221283 }
1284
+ return 0;
12231285 }
12241286
1225
-void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1226
- struct binder_buffer *buffer,
1227
- binder_size_t buffer_offset,
1228
- void *src,
1229
- size_t bytes)
1287
+int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1288
+ struct binder_buffer *buffer,
1289
+ binder_size_t buffer_offset,
1290
+ void *src,
1291
+ size_t bytes)
12301292 {
1231
- binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1232
- src, bytes);
1293
+ return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1294
+ src, bytes);
12331295 }
12341296
1235
-void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1236
- void *dest,
1237
- struct binder_buffer *buffer,
1238
- binder_size_t buffer_offset,
1239
- size_t bytes)
1297
+int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1298
+ void *dest,
1299
+ struct binder_buffer *buffer,
1300
+ binder_size_t buffer_offset,
1301
+ size_t bytes)
12401302 {
1241
- binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1242
- dest, bytes);
1303
+ return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1304
+ dest, bytes);
12431305 }
12441306