Lines Matching refs:alloc
66 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, in binder_alloc_buffer_size() argument
69 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
70 return (u8 *)alloc->buffer + in binder_alloc_buffer_size()
71 alloc->buffer_size - (u8 *)buffer->data; in binder_alloc_buffer_size()
75 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
78 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
86 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
90 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
97 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
105 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
109 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument
111 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
130 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
134 struct binder_alloc *alloc, in binder_alloc_prepare_to_free_locked() argument
137 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
141 kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); in binder_alloc_prepare_to_free_locked()
159 alloc->pid, current->pid, in binder_alloc_prepare_to_free_locked()
181 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, in binder_alloc_prepare_to_free() argument
186 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
187 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
188 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
192 static int binder_update_page_range(struct binder_alloc *alloc, int allocate, in binder_update_page_range() argument
203 "%d: %s pages %pK-%pK\n", alloc->pid, in binder_update_page_range()
209 trace_binder_update_page_range(alloc, allocate, start, end); in binder_update_page_range()
215 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
222 if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) in binder_update_page_range()
223 mm = alloc->vma_vm_mm; in binder_update_page_range()
227 vma = alloc->vma; in binder_update_page_range()
233 alloc->pid); in binder_update_page_range()
242 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
243 page = &alloc->pages[index]; in binder_update_page_range()
246 trace_binder_alloc_lru_start(alloc, index); in binder_update_page_range()
251 trace_binder_alloc_lru_end(alloc, index); in binder_update_page_range()
258 trace_binder_alloc_page_start(alloc, index); in binder_update_page_range()
264 alloc->pid, page_addr); in binder_update_page_range()
267 page->alloc = alloc; in binder_update_page_range()
277 alloc->pid, page_addr); in binder_update_page_range()
281 (uintptr_t)page_addr + alloc->user_buffer_offset; in binder_update_page_range()
285 alloc->pid, user_page_addr); in binder_update_page_range()
289 if (index + 1 > alloc->pages_high) in binder_update_page_range()
290 alloc->pages_high = index + 1; in binder_update_page_range()
292 trace_binder_alloc_page_end(alloc, index); in binder_update_page_range()
307 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
308 page = &alloc->pages[index]; in binder_update_page_range()
310 trace_binder_free_lru_start(alloc, index); in binder_update_page_range()
315 trace_binder_free_lru_end(alloc, index); in binder_update_page_range()
336 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, in binder_alloc_set_vma() argument
340 alloc->vma_vm_mm = vma->vm_mm; in binder_alloc_set_vma()
348 alloc->vma = vma; in binder_alloc_set_vma()
352 struct binder_alloc *alloc) in binder_alloc_get_vma() argument
356 if (alloc->vma) { in binder_alloc_get_vma()
359 vma = alloc->vma; in binder_alloc_get_vma()
365 struct binder_alloc *alloc, in binder_alloc_new_buf_locked() argument
371 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
380 if (!binder_alloc_get_vma(alloc)) { in binder_alloc_new_buf_locked()
383 alloc->pid); in binder_alloc_new_buf_locked()
393 alloc->pid, data_size, offsets_size); in binder_alloc_new_buf_locked()
400 alloc->pid, extra_buffers_size); in binder_alloc_new_buf_locked()
404 alloc->free_async_space < size + sizeof(struct binder_buffer)) { in binder_alloc_new_buf_locked()
407 alloc->pid, size); in binder_alloc_new_buf_locked()
417 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
437 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked()
440 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
446 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked()
449 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
457 alloc->pid, size); in binder_alloc_new_buf_locked()
467 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
472 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
481 ret = binder_update_page_range(alloc, 1, in binder_alloc_new_buf_locked()
492 __func__, alloc->pid); in binder_alloc_new_buf_locked()
498 binder_insert_free_buffer(alloc, new_buffer); in binder_alloc_new_buf_locked()
501 rb_erase(best_fit, &alloc->free_buffers); in binder_alloc_new_buf_locked()
504 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
507 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
513 alloc->free_async_space -= size + sizeof(struct binder_buffer); in binder_alloc_new_buf_locked()
516 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
521 binder_update_page_range(alloc, 0, in binder_alloc_new_buf_locked()
542 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, in binder_alloc_new_buf() argument
550 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
551 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
553 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
567 static void binder_delete_free_buffer(struct binder_alloc *alloc, in binder_delete_free_buffer() argument
572 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
579 alloc->pid, buffer->data, prev->data); in binder_delete_free_buffer()
582 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
588 alloc->pid, in binder_delete_free_buffer()
597 alloc->pid, buffer->data); in binder_delete_free_buffer()
604 alloc->pid, buffer->data, in binder_delete_free_buffer()
606 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
613 static void binder_free_buf_locked(struct binder_alloc *alloc, in binder_free_buf_locked() argument
618 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
626 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
631 BUG_ON(buffer->data < alloc->buffer); in binder_free_buf_locked()
632 BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
635 alloc->free_async_space += size + sizeof(struct binder_buffer); in binder_free_buf_locked()
639 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
642 binder_update_page_range(alloc, 0, in binder_free_buf_locked()
646 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
648 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
652 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
653 binder_delete_free_buffer(alloc, next); in binder_free_buf_locked()
656 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
660 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
661 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
665 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
675 void binder_alloc_free_buf(struct binder_alloc *alloc, in binder_alloc_free_buf() argument
678 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
679 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
680 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
696 int binder_alloc_mmap_handler(struct binder_alloc *alloc, in binder_alloc_mmap_handler() argument
705 if (alloc->buffer) { in binder_alloc_mmap_handler()
717 alloc->buffer = area->addr; in binder_alloc_mmap_handler()
718 alloc->user_buffer_offset = in binder_alloc_mmap_handler()
719 vma->vm_start - (uintptr_t)alloc->buffer; in binder_alloc_mmap_handler()
725 (vma->vm_start ^ (uint32_t)alloc->buffer))) { in binder_alloc_mmap_handler()
727 __func__, alloc->pid, vma->vm_start, in binder_alloc_mmap_handler()
728 vma->vm_end, alloc->buffer); in binder_alloc_mmap_handler()
733 alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, in binder_alloc_mmap_handler()
734 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
736 if (alloc->pages == NULL) { in binder_alloc_mmap_handler()
741 alloc->buffer_size = vma->vm_end - vma->vm_start; in binder_alloc_mmap_handler()
750 buffer->data = alloc->buffer; in binder_alloc_mmap_handler()
751 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
753 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
754 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
755 binder_alloc_set_vma(alloc, vma); in binder_alloc_mmap_handler()
756 mmgrab(alloc->vma_vm_mm); in binder_alloc_mmap_handler()
761 kfree(alloc->pages); in binder_alloc_mmap_handler()
762 alloc->pages = NULL; in binder_alloc_mmap_handler()
765 vfree(alloc->buffer); in binder_alloc_mmap_handler()
766 alloc->buffer = NULL; in binder_alloc_mmap_handler()
772 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
778 void binder_alloc_deferred_release(struct binder_alloc *alloc) in binder_alloc_deferred_release() argument
785 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
786 BUG_ON(alloc->vma); in binder_alloc_deferred_release()
788 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
794 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
798 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
799 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
804 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
809 if (alloc->pages) { in binder_alloc_deferred_release()
812 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
816 if (!alloc->pages[i].page_ptr) in binder_alloc_deferred_release()
820 &alloc->pages[i].lru); in binder_alloc_deferred_release()
821 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
824 __func__, alloc->pid, i, page_addr, in binder_alloc_deferred_release()
827 __free_page(alloc->pages[i].page_ptr); in binder_alloc_deferred_release()
830 kfree(alloc->pages); in binder_alloc_deferred_release()
831 vfree(alloc->buffer); in binder_alloc_deferred_release()
833 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
834 if (alloc->vma_vm_mm) in binder_alloc_deferred_release()
835 mmdrop(alloc->vma_vm_mm); in binder_alloc_deferred_release()
839 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
861 struct binder_alloc *alloc) in binder_alloc_print_allocated() argument
865 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
866 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated()
869 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
878 struct binder_alloc *alloc) in binder_alloc_print_pages() argument
886 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
887 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
888 page = &alloc->pages[i]; in binder_alloc_print_pages()
896 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
898 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
907 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) in binder_alloc_get_allocated_count() argument
912 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
913 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
915 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
928 void binder_alloc_vma_close(struct binder_alloc *alloc) in binder_alloc_vma_close() argument
930 binder_alloc_set_vma(alloc, NULL); in binder_alloc_vma_close()
951 struct binder_alloc *alloc; in binder_alloc_free_page() local
956 alloc = page->alloc; in binder_alloc_free_page()
957 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
963 index = page - alloc->pages; in binder_alloc_free_page()
964 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
965 vma = binder_alloc_get_vma(alloc); in binder_alloc_free_page()
967 if (!mmget_not_zero(alloc->vma_vm_mm)) in binder_alloc_free_page()
969 mm = alloc->vma_vm_mm; in binder_alloc_free_page()
978 trace_binder_unmap_user_start(alloc, index); in binder_alloc_free_page()
981 page_addr + alloc->user_buffer_offset, in binder_alloc_free_page()
984 trace_binder_unmap_user_end(alloc, index); in binder_alloc_free_page()
990 trace_binder_unmap_kernel_start(alloc, index); in binder_alloc_free_page()
996 trace_binder_unmap_kernel_end(alloc, index); in binder_alloc_free_page()
999 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1006 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1041 void binder_alloc_init(struct binder_alloc *alloc) in binder_alloc_init() argument
1043 alloc->pid = current->group_leader->pid; in binder_alloc_init()
1044 mutex_init(&alloc->mutex); in binder_alloc_init()
1045 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()