diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 14:18:59 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 14:18:59 -0800 |
| commit | 45763bf4bc1ebdf8eb95697607e1fd042a3e1221 (patch) | |
| tree | c5b26c2d5d1190247b59d6d1fe68b8a247351362 /drivers/android/binder_alloc.c | |
| parent | da2577fe63f865cd9dc785a42c29c0071f567a35 (diff) | |
| parent | 142a0f83b216a607aebed42e54a1be620765e28c (diff) | |
Merge tag 'char-misc-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH:
"Here is the big char/misc driver patch pull request for 5.1-rc1.
The largest thing by far is the new habanalabs driver for their AI
accelerator chip. For now it is in the drivers/misc directory but will
probably move to a new directory soon along with other drivers of this
type.
Other than that, just the usual set of individual driver updates and
fixes. There's an "odd" merge in here from the DRM tree that they
asked me to do as the MEI driver is starting to interact with the i915
driver, and it needed some coordination. All of those patches have
been properly acked by the relevant subsystem maintainers.
All of these have been in linux-next with no reported issues, most for
quite some time"
* tag 'char-misc-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (219 commits)
habanalabs: adjust Kconfig to fix build errors
habanalabs: use %px instead of %p in error print
habanalabs: use do_div for 64-bit divisions
intel_th: gth: Fix an off-by-one in output unassigning
habanalabs: fix little-endian<->cpu conversion warnings
habanalabs: use NULL to initialize array of pointers
habanalabs: fix little-endian<->cpu conversion warnings
habanalabs: soft-reset device if context-switch fails
habanalabs: print pointer using %p
habanalabs: fix memory leak with CBs with unaligned size
habanalabs: return correct error code on MMU mapping failure
habanalabs: add comments in uapi/misc/habanalabs.h
habanalabs: extend QMAN0 job timeout
habanalabs: set DMA0 completion to SOB 1007
habanalabs: fix validation of WREG32 to DMA completion
habanalabs: fix mmu cache registers init
habanalabs: disable CPU access on timeouts
habanalabs: add MMU DRAM default page mapping
habanalabs: Dissociate RAZWI info from event types
misc/habanalabs: adjust Kconfig to fix build errors
...
Diffstat (limited to 'drivers/android/binder_alloc.c')
| -rw-r--r-- | drivers/android/binder_alloc.c | 303 |
1 files changed, 219 insertions, 84 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 022cd80e80cc..6389467670a0 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -29,6 +29,8 @@ #include <linux/list_lru.h> #include <linux/ratelimit.h> #include <asm/cacheflush.h> +#include <linux/uaccess.h> +#include <linux/highmem.h> #include "binder_alloc.h" #include "binder_trace.h" @@ -67,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &alloc->buffers)) - return (u8 *)alloc->buffer + - alloc->buffer_size - (u8 *)buffer->data; - return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; + return alloc->buffer + alloc->buffer_size - buffer->user_data; + return binder_buffer_next(buffer)->user_data - buffer->user_data; } static void binder_insert_free_buffer(struct binder_alloc *alloc, @@ -119,9 +120,9 @@ static void binder_insert_allocated_buffer_locked( buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (new_buffer->data < buffer->data) + if (new_buffer->user_data < buffer->user_data) p = &parent->rb_left; - else if (new_buffer->data > buffer->data) + else if (new_buffer->user_data > buffer->user_data) p = &parent->rb_right; else BUG(); @@ -136,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; - void *kern_ptr; + void __user *uptr; - kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); + uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (kern_ptr < buffer->data) + if (uptr < buffer->user_data) n = n->rb_left; - else if (kern_ptr > buffer->data) + else if (uptr > buffer->user_data) n = n->rb_right; else { /* @@ -186,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end) + void __user *start, void __user *end) { - void *page_addr; + void __user *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; struct vm_area_struct *vma = NULL; @@ -263,18 +264,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, page->alloc = alloc; INIT_LIST_HEAD(&page->lru); - ret = map_kernel_range_noflush((unsigned long)page_addr, - PAGE_SIZE, PAGE_KERNEL, - &page->page_ptr); - flush_cache_vmap((unsigned long)page_addr, - (unsigned long)page_addr + PAGE_SIZE); - if (ret != 1) { - pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", - alloc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + alloc->user_buffer_offset; + user_page_addr = (uintptr_t)page_addr; ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", @@ -312,8 +302,6 @@ free_range: continue; err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: __free_page(page->page_ptr); page->page_ptr = NULL; err_alloc_page_failed: @@ -368,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; + void __user *has_page_addr; + void __user *end_page_addr; size_t size, data_offsets_size; int ret; @@ -467,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked( "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + has_page_addr = (void __user *) + (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); WARN_ON(n && buffer_size != size); end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; - ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); + ret = binder_update_page_range(alloc, 1, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); if (ret) return ERR_PTR(ret); @@ -488,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( __func__, alloc->pid); goto err_alloc_buf_struct_failed; } - new_buffer->data = (u8 *)buffer->data + size; + new_buffer->user_data = (u8 __user *)buffer->user_data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(alloc, new_buffer); @@ -514,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( return buffer; err_alloc_buf_struct_failed: - binder_update_page_range(alloc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), + binder_update_page_range(alloc, 0, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); return ERR_PTR(-ENOMEM); } @@ -550,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, return buffer; } -static void *buffer_start_page(struct binder_buffer *buffer) +static void __user *buffer_start_page(struct binder_buffer *buffer) { - return (void *)((uintptr_t)buffer->data & PAGE_MASK); + return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); } -static void *prev_buffer_end_page(struct binder_buffer *buffer) +static void __user *prev_buffer_end_page(struct binder_buffer *buffer) { - return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); + return (void __user *) + (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_alloc *alloc, @@ -572,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer->data, prev->data); + alloc->pid, buffer->user_data, + prev->user_data); } if (!list_is_last(&buffer->entry, &alloc->buffers)) { @@ -582,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", alloc->pid, - buffer->data, - next->data); + buffer->user_data, + next->user_data); } } - if (PAGE_ALIGNED(buffer->data)) { + if (PAGE_ALIGNED(buffer->user_data)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer start %pK is page aligned\n", - alloc->pid, buffer->data); + alloc->pid, buffer->user_data); to_free = false; } if (to_free) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK do not share page with %pK or %pK\n", - alloc->pid, buffer->data, - prev->data, next ? next->data : NULL); + alloc->pid, buffer->user_data, + prev->user_data, + next ? next->user_data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), buffer_start_page(buffer) + PAGE_SIZE); } @@ -624,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); - BUG_ON(buffer->data < alloc->buffer); - BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); + BUG_ON(buffer->user_data < alloc->buffer); + BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { alloc->free_async_space += size + sizeof(struct binder_buffer); @@ -636,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, } binder_update_page_range(alloc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), + (void __user *)(((uintptr_t) + buffer->user_data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -693,7 +685,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma) { int ret; - struct vm_struct *area; const char *failure_string; struct binder_buffer *buffer; @@ -704,28 +695,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_already_mapped; } - area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); - if (area == NULL) { - ret = -ENOMEM; - failure_string = "get_vm_area"; - goto err_get_vm_area_failed; - } - alloc->buffer = area->addr; - alloc->user_buffer_offset = - vma->vm_start - (uintptr_t)alloc->buffer; + alloc->buffer = (void __user *)vma->vm_start; mutex_unlock(&binder_alloc_mmap_lock); -#ifdef CONFIG_CPU_CACHE_VIPT - if (cache_is_vipt_aliasing()) { - while (CACHE_COLOUR( - (vma->vm_start ^ (uint32_t)alloc->buffer))) { - pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", - __func__, alloc->pid, vma->vm_start, - vma->vm_end, alloc->buffer); - vma->vm_start += PAGE_SIZE; - } - } -#endif alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL); @@ -743,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_alloc_buf_struct_failed; } - buffer->data = alloc->buffer; + buffer->user_data = alloc->buffer; list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; binder_insert_free_buffer(alloc, buffer); @@ -758,9 +730,7 @@ err_alloc_buf_struct_failed: alloc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_alloc_mmap_lock); - vfree(alloc->buffer); alloc->buffer = NULL; -err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, @@ -806,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int i; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - void *page_addr; + void __user *page_addr; bool on_lru; if (!alloc->pages[i].page_ptr) @@ -819,12 +789,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) "%s: %d: page %d at %pK %s\n", __func__, alloc->pid, i, page_addr, on_lru ? "on lru" : "active"); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(alloc->pages[i].page_ptr); page_count++; } kfree(alloc->pages); - vfree(alloc->buffer); } mutex_unlock(&alloc->mutex); if (alloc->vma_vm_mm) @@ -839,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, + prefix, buffer->debug_id, buffer->user_data, buffer->data_size, buffer->offsets_size, buffer->extra_buffers_size, buffer->transaction ? "active" : "delivered"); @@ -964,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (!mmget_not_zero(alloc->vma_vm_mm)) goto err_mmget; mm = alloc->vma_vm_mm; - if (!down_write_trylock(&mm->mmap_sem)) + if (!down_read_trylock(&mm->mmap_sem)) goto err_down_write_mmap_sem_failed; } @@ -974,19 +942,16 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (vma) { trace_binder_unmap_user_start(alloc, index); - zap_page_range(vma, - page_addr + alloc->user_buffer_offset, - PAGE_SIZE); + zap_page_range(vma, page_addr, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } trace_binder_unmap_kernel_start(alloc, index); - unmap_kernel_range(page_addr, PAGE_SIZE); __free_page(page->page_ptr); page->page_ptr = NULL; @@ -1053,3 +1018,173 @@ int binder_alloc_shrinker_init(void) } return ret; } + +/** + * check_buffer() - verify that buffer/offset is safe to access + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @offset: offset into @buffer data + * @bytes: bytes to access from offset + * + * Check that the @offset/@bytes are within the size of the given + * @buffer and that the buffer is currently active and not freeable. + * Offsets must also be multiples of sizeof(u32). The kernel is + * allowed to touch the buffer in two cases: + * + * 1) when the buffer is being created: + * (buffer->free == 0 && buffer->allow_user_free == 0) + * 2) when the buffer is being torn down: + * (buffer->free == 0 && buffer->transaction == NULL). + * + * Return: true if the buffer is safe to access + */ +static inline bool check_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t offset, size_t bytes) +{ + size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); + + return buffer_size >= bytes && + offset <= buffer_size - bytes && + IS_ALIGNED(offset, sizeof(u32)) && + !buffer->free && + (!buffer->allow_user_free || !buffer->transaction); +} + +/** + * binder_alloc_get_page() - get kernel pointer for given buffer offset + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @pgoffp: address to copy final page offset to + * + * Lookup the struct page corresponding to the address + * at @buffer_offset into @buffer->user_data. If @pgoffp is not + * NULL, the byte-offset into the page is written there. + * + * The caller is responsible to ensure that the offset points + * to a valid address within the @buffer and that @buffer is + * not freeable by the user. Since it can't be freed, we are + * guaranteed that the corresponding elements of @alloc->pages[] + * cannot change. + * + * Return: struct page + */ +static struct page *binder_alloc_get_page(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + pgoff_t *pgoffp) +{ + binder_size_t buffer_space_offset = buffer_offset + + (buffer->user_data - alloc->buffer); + pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; + size_t index = buffer_space_offset >> PAGE_SHIFT; + struct binder_lru_page *lru_page; + + lru_page = &alloc->pages[index]; + *pgoffp = pgoff; + return lru_page->page_ptr; +} + +/** + * binder_alloc_copy_user_to_buffer() - copy src user to tgt user + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @from: userspace pointer to source buffer + * @bytes: bytes to copy + * + * Copy bytes from source userspace to target buffer. + * + * Return: bytes remaining to be copied + */ +unsigned long +binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + const void __user *from, + size_t bytes) +{ + if (!check_buffer(alloc, buffer, buffer_offset, bytes)) + return bytes; + + while (bytes) { + unsigned long size; + unsigned long ret; + struct page *page; + pgoff_t pgoff; + void *kptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + kptr = kmap(page) + pgoff; + ret = copy_from_user(kptr, from, size); + kunmap(page); + if (ret) + return bytes - size + ret; + bytes -= size; + from += size; + buffer_offset += size; + } + return 0; +} + +static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc, + bool to_buffer, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *ptr, + size_t bytes) +{ + /* All copies must be 32-bit aligned and 32-bit size */ + BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes)); + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + void *tmpptr; + void *base_ptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + base_ptr = kmap_atomic(page); + tmpptr = base_ptr + pgoff; + if (to_buffer) + memcpy(tmpptr, ptr, size); + else + memcpy(ptr, tmpptr, size); + /* + * kunmap_atomic() takes care of flushing the cache + * if this device has VIVT cache arch + */ + kunmap_atomic(base_ptr); + bytes -= size; + pgoff = 0; + ptr = ptr + size; + buffer_offset += size; + } +} + +void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *src, + size_t bytes) +{ + binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, + src, bytes); +} + +void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, + void *dest, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + size_t bytes) +{ + binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, + dest, bytes); +} + |
