Lines Matching full:buffer

50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)  in binder_buffer_next()  argument
52 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) in binder_buffer_prev() argument
57 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
61 struct binder_buffer *buffer) in binder_alloc_buffer_size() argument
63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
64 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
65 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
73 struct binder_buffer *buffer; in binder_insert_free_buffer() local
82 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
87 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
88 BUG_ON(!buffer->free); in binder_insert_free_buffer()
90 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
106 struct binder_buffer *buffer; in binder_insert_allocated_buffer_locked() local
112 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
113 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
115 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
117 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
131 struct binder_buffer *buffer; in binder_alloc_prepare_to_free_locked() local
137 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
138 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
140 if (uptr < buffer->user_data) in binder_alloc_prepare_to_free_locked()
142 else if (uptr > buffer->user_data) in binder_alloc_prepare_to_free_locked()
147 * free the buffer when in use by kernel or in binder_alloc_prepare_to_free_locked()
150 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
152 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
153 return buffer; in binder_alloc_prepare_to_free_locked()
160 * binder_alloc_prepare_to_free() - get buffer given user ptr
162 * @user_ptr: User pointer to buffer data
164 * Validate userspace pointer to buffer data and return buffer corresponding to
165 * that user pointer. Search the rb tree for buffer that matches user data
168 * Return: Pointer to buffer or NULL
173 struct binder_buffer *buffer; in binder_alloc_prepare_to_free() local
176 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
178 return buffer; in binder_alloc_prepare_to_free()
204 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
231 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
283 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
319 * If we see alloc->vma is not NULL, buffer data structures set up in binder_alloc_set_vma()
351 struct binder_buffer *buffer; in debug_low_async_space_locked() local
357 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_low_async_space_locked()
358 if (buffer->pid != pid) in debug_low_async_space_locked()
360 if (!buffer->async_transaction) in debug_low_async_space_locked()
362 total_alloc_size += binder_alloc_buffer_size(alloc, buffer) in debug_low_async_space_locked()
369 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
393 struct binder_buffer *buffer; in binder_alloc_new_buf_locked() local
436 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
437 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
438 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
460 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
461 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
469 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
470 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
487 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
488 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
492 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
493 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
496 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); in binder_alloc_new_buf_locked()
499 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); in binder_alloc_new_buf_locked()
503 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); in binder_alloc_new_buf_locked()
510 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_new_buf_locked()
512 pr_err("%s: %d failed to alloc new buffer struct\n", in binder_alloc_new_buf_locked()
516 new_buffer->user_data = (u8 __user *)buffer->user_data + size; in binder_alloc_new_buf_locked()
517 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
523 buffer->free = 0; in binder_alloc_new_buf_locked()
524 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
525 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
528 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
529 buffer->data_size = data_size; in binder_alloc_new_buf_locked()
530 buffer->offsets_size = offsets_size; in binder_alloc_new_buf_locked()
531 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
532 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf_locked()
533 buffer->pid = pid; in binder_alloc_new_buf_locked()
534 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
544 * buffer size). in binder_alloc_new_buf_locked()
546 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); in binder_alloc_new_buf_locked()
551 return buffer; in binder_alloc_new_buf_locked()
555 PAGE_ALIGN((uintptr_t)buffer->user_data), in binder_alloc_new_buf_locked()
561 * binder_alloc_new_buf() - Allocate a new binder buffer
563 * @data_size: size of user data buffer
564 * @offsets_size: user specified buffer offset
566 * @is_async: buffer for async transaction
569 * Allocate a new buffer given the requested sizes. Returns
570 * the kernel version of the buffer pointer. The size allocated
574 * Return: The allocated buffer or %NULL if error
583 struct binder_buffer *buffer; in binder_alloc_new_buf() local
586 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
589 return buffer; in binder_alloc_new_buf()
592 static void __user *buffer_start_page(struct binder_buffer *buffer) in buffer_start_page() argument
594 return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); in buffer_start_page()
597 static void __user *prev_buffer_end_page(struct binder_buffer *buffer) in prev_buffer_end_page() argument
600 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); in prev_buffer_end_page()
604 struct binder_buffer *buffer) in binder_delete_free_buffer() argument
609 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
610 prev = binder_buffer_prev(buffer); in binder_delete_free_buffer()
612 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { in binder_delete_free_buffer()
615 "%d: merge free, buffer %pK share page with %pK\n", in binder_delete_free_buffer()
616 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
620 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
621 next = binder_buffer_next(buffer); in binder_delete_free_buffer()
622 if (buffer_start_page(next) == buffer_start_page(buffer)) { in binder_delete_free_buffer()
625 "%d: merge free, buffer %pK share page with %pK\n", in binder_delete_free_buffer()
627 buffer->user_data, in binder_delete_free_buffer()
632 if (PAGE_ALIGNED(buffer->user_data)) { in binder_delete_free_buffer()
634 "%d: merge free, buffer start %pK is page aligned\n", in binder_delete_free_buffer()
635 alloc->pid, buffer->user_data); in binder_delete_free_buffer()
641 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", in binder_delete_free_buffer()
642 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
645 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
646 buffer_start_page(buffer) + PAGE_SIZE); in binder_delete_free_buffer()
648 list_del(&buffer->entry); in binder_delete_free_buffer()
649 kfree(buffer); in binder_delete_free_buffer()
653 struct binder_buffer *buffer) in binder_free_buf_locked() argument
657 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
659 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
660 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
661 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
665 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
667 BUG_ON(buffer->free); in binder_free_buf_locked()
669 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
670 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
671 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
673 if (buffer->async_transaction) { in binder_free_buf_locked()
682 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), in binder_free_buf_locked()
684 buffer->user_data + buffer_size) & PAGE_MASK)); in binder_free_buf_locked()
686 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
687 buffer->free = 1; in binder_free_buf_locked()
688 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
689 struct binder_buffer *next = binder_buffer_next(buffer); in binder_free_buf_locked()
696 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
697 struct binder_buffer *prev = binder_buffer_prev(buffer); in binder_free_buf_locked()
700 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
702 buffer = prev; in binder_free_buf_locked()
705 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
709 struct binder_buffer *buffer);
711 * binder_alloc_free_buf() - free a binder buffer
713 * @buffer: kernel pointer to buffer
715 * Free the buffer allocated via binder_alloc_new_buf()
718 struct binder_buffer *buffer) in binder_alloc_free_buf() argument
728 if (buffer->clear_on_free) { in binder_alloc_free_buf()
729 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
730 buffer->clear_on_free = false; in binder_alloc_free_buf()
733 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
755 struct binder_buffer *buffer; in binder_alloc_mmap_handler() local
767 alloc->buffer = (void __user *)vma->vm_start; in binder_alloc_mmap_handler()
778 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_mmap_handler()
779 if (!buffer) { in binder_alloc_mmap_handler()
781 failure_string = "alloc buffer struct"; in binder_alloc_mmap_handler()
785 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
786 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
787 buffer->free = 1; in binder_alloc_mmap_handler()
788 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
799 alloc->buffer = NULL; in binder_alloc_mmap_handler()
816 struct binder_buffer *buffer; in binder_alloc_deferred_release() local
823 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
826 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
828 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
829 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
830 buffer->clear_on_free = false; in binder_alloc_deferred_release()
832 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
837 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
839 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
841 list_del(&buffer->entry); in binder_alloc_deferred_release()
843 kfree(buffer); in binder_alloc_deferred_release()
859 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
879 struct binder_buffer *buffer) in print_binder_buffer() argument
882 prefix, buffer->debug_id, buffer->user_data, in print_binder_buffer()
883 buffer->data_size, buffer->offsets_size, in print_binder_buffer()
884 buffer->extra_buffers_size, in print_binder_buffer()
885 buffer->transaction ? "active" : "delivered"); in print_binder_buffer()
889 * binder_alloc_print_allocated() - print buffer info
893 * Prints information about every buffer associated with
903 print_binder_buffer(m, " buffer", in binder_alloc_print_allocated()
1007 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1099 * check_buffer() - verify that buffer/offset is safe to access
1101 * @buffer: binder buffer to be accessed
1102 * @offset: offset into @buffer data
1106 * @buffer and that the buffer is currently active and not freeable.
1108 * allowed to touch the buffer in two cases:
1110 * 1) when the buffer is being created:
1111 * (buffer->free == 0 && buffer->allow_user_free == 0)
1112 * 2) when the buffer is being torn down:
1113 * (buffer->free == 0 && buffer->transaction == NULL).
1115 * Return: true if the buffer is safe to access
1118 struct binder_buffer *buffer, in check_buffer() argument
1121 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1126 !buffer->free && in check_buffer()
1127 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1131 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1133 * @buffer: binder buffer to be accessed
1134 * @buffer_offset: offset into @buffer data
1138 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1142 * to a valid address within the @buffer and that @buffer is
1150 struct binder_buffer *buffer, in binder_alloc_get_page() argument
1155 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
1166 * binder_alloc_clear_buf() - zero out buffer
1168 * @buffer: binder buffer to be cleared
1170 * memset the given buffer to 0
1173 struct binder_buffer *buffer) in binder_alloc_clear_buf() argument
1175 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
1184 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
1198 * @buffer: binder buffer to be accessed
1199 * @buffer_offset: offset into @buffer data
1200 * @from: userspace pointer to source buffer
1203 * Copy bytes from source userspace to target buffer.
1209 struct binder_buffer *buffer, in binder_alloc_copy_user_to_buffer() argument
1214 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1224 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1241 struct binder_buffer *buffer, in binder_alloc_do_buffer_copy() argument
1247 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1257 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1280 struct binder_buffer *buffer, in binder_alloc_copy_to_buffer() argument
1285 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1291 struct binder_buffer *buffer, in binder_alloc_copy_from_buffer() argument
1295 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()