Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: android: fix missing a blank line after declarations

This patch fixes "Missing a blank line after declarations" warnings.

Signed-off-by: Seunghun Lee <waydi1@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Seunghun Lee and committed by
Greg Kroah-Hartman
10f62861 ab0fbdc2

+67
+1
drivers/staging/android/alarm-dev.c
··· 329 329 if (file->private_data) { 330 330 for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { 331 331 uint32_t alarm_type_mask = 1U << i; 332 + 332 333 if (alarm_enabled & alarm_type_mask) { 333 334 alarm_dbg(INFO, 334 335 "%s: clear alarm, pending %d\n",
+34
drivers/staging/android/binder.c
··· 118 118 struct kernel_param *kp) 119 119 { 120 120 int ret; 121 + 121 122 ret = param_set_int(val, kp); 122 123 if (binder_stop_on_user_error < 2) 123 124 wake_up(&binder_user_error_wait); ··· 195 194 struct binder_transaction_log *log) 196 195 { 197 196 struct binder_transaction_log_entry *e; 197 + 198 198 e = &log->entry[log->next]; 199 199 memset(e, 0, sizeof(*e)); 200 200 log->next++; ··· 434 432 static void binder_set_nice(long nice) 435 433 { 436 434 long min_nice; 435 + 437 436 if (can_nice(current, nice)) { 438 437 set_user_nice(current, nice); 439 438 return; ··· 587 584 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 588 585 int ret; 589 586 struct page **page_array_ptr; 587 + 590 588 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 591 589 592 590 BUG_ON(*page); ··· 730 726 binder_insert_allocated_buffer(proc, buffer); 731 727 if (buffer_size != size) { 732 728 struct binder_buffer *new_buffer = (void *)buffer->data + size; 729 + 733 730 list_add(&new_buffer->entry, &buffer->entry); 734 731 new_buffer->free = 1; 735 732 binder_insert_free_buffer(proc, new_buffer); ··· 843 838 if (!list_is_last(&buffer->entry, &proc->buffers)) { 844 839 struct binder_buffer *next = list_entry(buffer->entry.next, 845 840 struct binder_buffer, entry); 841 + 846 842 if (next->free) { 847 843 rb_erase(&next->rb_node, &proc->free_buffers); 848 844 binder_delete_free_buffer(proc, next); ··· 852 846 if (proc->buffers.next != &buffer->entry) { 853 847 struct binder_buffer *prev = list_entry(buffer->entry.prev, 854 848 struct binder_buffer, entry); 849 + 855 850 if (prev->free) { 856 851 binder_delete_free_buffer(proc, buffer); 857 852 rb_erase(&prev->rb_node, &proc->free_buffers); ··· 1114 1107 struct list_head *target_list) 1115 1108 { 1116 1109 int ret; 1110 + 1117 1111 if (strong) { 1118 1112 if (ref->strong == 0) { 1119 1113 ret = binder_inc_node(ref->node, 1, 1, target_list); ··· 1146 1138 ref->strong--; 1147 1139 if (ref->strong == 0) { 1148 1140 int ret; 1141 + 1149 1142 ret = binder_dec_node(ref->node, strong, 1); 1150 1143 if (ret) 1151 1144 return ret; ··· 1186 1177 uint32_t error_code) 1187 1178 { 1188 1179 struct binder_thread *target_thread; 1180 + 1189 1181 BUG_ON(t->flags & TF_ONE_WAY); 1190 1182 while (1) { 1191 1183 target_thread = t->from; ··· 1257 1247 off_end = (void *)offp + buffer->offsets_size; 1258 1248 for (; offp < off_end; offp++) { 1259 1249 struct flat_binder_object *fp; 1250 + 1260 1251 if (*offp > buffer->data_size - sizeof(*fp) || 1261 1252 buffer->data_size < sizeof(*fp) || 1262 1253 !IS_ALIGNED(*offp, sizeof(u32))) { ··· 1270 1259 case BINDER_TYPE_BINDER: 1271 1260 case BINDER_TYPE_WEAK_BINDER: { 1272 1261 struct binder_node *node = binder_get_node(proc, fp->binder); 1262 + 1273 1263 if (node == NULL) { 1274 1264 pr_err("transaction release %d bad node %016llx\n", 1275 1265 debug_id, (u64)fp->binder); ··· 1284 1272 case BINDER_TYPE_HANDLE: 1285 1273 case BINDER_TYPE_WEAK_HANDLE: { 1286 1274 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1275 + 1287 1276 if (ref == NULL) { 1288 1277 pr_err("transaction release %d bad handle %d\n", 1289 1278 debug_id, fp->handle); ··· 1376 1363 } else { 1377 1364 if (tr->target.handle) { 1378 1365 struct binder_ref *ref; 1366 + 1379 1367 ref = binder_get_ref(proc, tr->target.handle); 1380 1368 if (ref == NULL) { 1381 1369 binder_user_error("%d:%d got transaction to invalid handle\n", ··· 1400 1386 } 1401 1387 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1402 1388 struct binder_transaction *tmp; 1389 + 1403 1390 tmp = thread->transaction_stack; 1404 1391 if (tmp->to_thread != thread) { 1405 1392 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", ··· 1516 1501 off_end = (void *)offp + tr->offsets_size; 1517 1502 for (; offp < off_end; offp++) { 1518 1503 struct flat_binder_object *fp; 1504 + 1519 1505 if (*offp > t->buffer->data_size - sizeof(*fp) || 1520 1506 t->buffer->data_size < sizeof(*fp) || 1521 1507 !IS_ALIGNED(*offp, sizeof(u32))) { ··· 1531 1515 case BINDER_TYPE_WEAK_BINDER: { 1532 1516 struct binder_ref *ref; 1533 1517 struct binder_node *node = binder_get_node(proc, fp->binder); 1518 + 1534 1519 if (node == NULL) { 1535 1520 node = binder_new_node(proc, fp->binder, fp->cookie); 1536 1521 if (node == NULL) { ··· 1570 1553 case BINDER_TYPE_HANDLE: 1571 1554 case BINDER_TYPE_WEAK_HANDLE: { 1572 1555 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1556 + 1573 1557 if (ref == NULL) { 1574 1558 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1575 1559 proc->pid, ··· 1593 1575 (u64)ref->node->ptr); 1594 1576 } else { 1595 1577 struct binder_ref *new_ref; 1578 + 1596 1579 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1597 1580 if (new_ref == NULL) { 1598 1581 return_error = BR_FAILED_REPLY; ··· 1713 1694 1714 1695 { 1715 1696 struct binder_transaction_log_entry *fe; 1697 + 1716 1698 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1717 1699 *fe = *e; 1718 1700 } ··· 2044 2024 struct binder_work *w; 2045 2025 binder_uintptr_t cookie; 2046 2026 struct binder_ref_death *death = NULL; 2027 + 2047 2028 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2048 2029 return -EFAULT; 2049 2030 2050 2031 ptr += sizeof(void *); 2051 2032 list_for_each_entry(w, &proc->delivered_death, entry) { 2052 2033 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2034 + 2053 2035 if (tmp_death->cookie == cookie) { 2054 2036 death = tmp_death; 2055 2037 break; ··· 2238 2216 const char *cmd_name; 2239 2217 int strong = node->internal_strong_refs || node->local_strong_refs; 2240 2218 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2219 + 2241 2220 if (weak && !node->has_weak_ref) { 2242 2221 cmd = BR_INCREFS; 2243 2222 cmd_name = "BR_INCREFS"; ··· 2345 2322 BUG_ON(t->buffer == NULL); 2346 2323 if (t->buffer->target_node) { 2347 2324 struct binder_node *target_node = t->buffer->target_node; 2325 + 2348 2326 tr.target.ptr = target_node->ptr; 2349 2327 tr.cookie = target_node->cookie; 2350 2328 t->saved_priority = task_nice(current); ··· 2367 2343 2368 2344 if (t->from) { 2369 2345 struct task_struct *sender = t->from->proc->tsk; 2346 + 2370 2347 tr.sender_pid = task_tgid_nr_ns(sender, 2371 2348 task_active_pid_ns(current)); 2372 2349 } else { ··· 2438 2413 static void binder_release_work(struct list_head *list) 2439 2414 { 2440 2415 struct binder_work *w; 2416 + 2441 2417 while (!list_empty(list)) { 2442 2418 w = list_first_entry(list, struct binder_work, entry); 2443 2419 list_del_init(&w->entry); ··· 2619 2593 switch (cmd) { 2620 2594 case BINDER_WRITE_READ: { 2621 2595 struct binder_write_read bwr; 2596 + 2622 2597 if (size != sizeof(struct binder_write_read)) { 2623 2598 ret = -EINVAL; 2624 2599 goto err; ··· 2744 2717 static void binder_vma_open(struct vm_area_struct *vma) 2745 2718 { 2746 2719 struct binder_proc *proc = vma->vm_private_data; 2720 + 2747 2721 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2748 2722 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2749 2723 proc->pid, vma->vm_start, vma->vm_end, ··· 2755 2727 static void binder_vma_close(struct vm_area_struct *vma) 2756 2728 { 2757 2729 struct binder_proc *proc = vma->vm_private_data; 2730 + 2758 2731 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2759 2732 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2760 2733 proc->pid, vma->vm_start, vma->vm_end, ··· 2898 2869 2899 2870 if (binder_debugfs_dir_entry_proc) { 2900 2871 char strbuf[11]; 2872 + 2901 2873 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2902 2874 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 2903 2875 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); ··· 2920 2890 { 2921 2891 struct rb_node *n; 2922 2892 int wake_count = 0; 2893 + 2923 2894 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2924 2895 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2896 + 2925 2897 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2926 2898 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2927 2899 wake_up_interruptible(&thread->wait); ··· 2940 2908 static int binder_release(struct inode *nodp, struct file *filp) 2941 2909 { 2942 2910 struct binder_proc *proc = filp->private_data; 2911 + 2943 2912 debugfs_remove(proc->debugfs_entry); 2944 2913 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2945 2914 ··· 3102 3069 struct files_struct *files; 3103 3070 3104 3071 int defer; 3072 + 3105 3073 do { 3106 3074 binder_lock(__func__); 3107 3075 mutex_lock(&binder_deferred_lock);
+10
drivers/staging/android/ion/ion.c
··· 408 408 409 409 while (n) { 410 410 struct ion_handle *entry = rb_entry(n, struct ion_handle, node); 411 + 411 412 if (buffer < entry->buffer) 412 413 n = n->rb_left; 413 414 else if (buffer > entry->buffer) ··· 721 720 { 722 721 int serial = -1; 723 722 struct rb_node *node; 723 + 724 724 for (node = rb_first(root); node; node = rb_next(node)) { 725 725 struct ion_client *client = rb_entry(node, struct ion_client, 726 726 node); 727 + 727 728 if (strcmp(client->name, name)) 728 729 continue; 729 730 serial = max(serial, client->display_serial); ··· 1038 1035 static void ion_dma_buf_release(struct dma_buf *dmabuf) 1039 1036 { 1040 1037 struct ion_buffer *buffer = dmabuf->priv; 1038 + 1041 1039 ion_buffer_put(buffer); 1042 1040 } 1043 1041 1044 1042 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 1045 1043 { 1046 1044 struct ion_buffer *buffer = dmabuf->priv; 1045 + 1047 1046 return buffer->vaddr + offset * PAGE_SIZE; 1048 1047 } 1049 1048 ··· 1297 1292 case ION_IOC_IMPORT: 1298 1293 { 1299 1294 struct ion_handle *handle; 1295 + 1300 1296 handle = ion_import_dma_buf(client, data.fd.fd); 1301 1297 if (IS_ERR(handle)) 1302 1298 ret = PTR_ERR(handle); ··· 1399 1393 struct ion_client *client = rb_entry(n, struct ion_client, 1400 1394 node); 1401 1395 size_t size = ion_debug_heap_total(client, heap->id); 1396 + 1402 1397 if (!size) 1403 1398 continue; 1404 1399 if (client->task) { ··· 1523 1516 1524 1517 if (!debug_file) { 1525 1518 char buf[256], *path; 1519 + 1526 1520 path = dentry_path(dev->heaps_debug_root, buf, 256); 1527 1521 pr_err("Failed to create heap debugfs at %s/%s\n", 1528 1522 path, heap->name); ··· 1539 1531 &debug_shrink_fops); 1540 1532 if (!debug_file) { 1541 1533 char buf[256], *path; 1534 + 1542 1535 path = dentry_path(dev->heaps_debug_root, buf, 256); 1543 1536 pr_err("Failed to create heap shrinker debugfs at %s/%s\n", 1544 1537 path, debug_name); ··· 1615 1606 1616 1607 if (data->heaps[i].base == 0) { 1617 1608 phys_addr_t paddr; 1609 + 1618 1610 paddr = memblock_alloc_base(data->heaps[i].size, 1619 1611 data->heaps[i].align, 1620 1612 MEMBLOCK_ALLOC_ANYWHERE);
+2
drivers/staging/android/ion/ion_heap.c
··· 48 48 for_each_sg(table->sgl, sg, table->nents, i) { 49 49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; 50 50 struct page *page = sg_page(sg); 51 + 51 52 BUG_ON(i >= npages); 52 53 for (j = 0; j < npages_this_entry; j++) 53 54 *(tmp++) = page++; ··· 106 105 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) 107 106 { 108 107 void *addr = vm_map_ram(pages, num, -1, pgprot); 108 + 109 109 if (!addr) 110 110 return -ENOMEM; 111 111 memset(addr, 0, PAGE_SIZE * num);
+1
drivers/staging/android/ion/ion_priv.h
··· 178 178 spinlock_t free_lock; 179 179 wait_queue_head_t waitqueue; 180 180 struct task_struct *task; 181 + 181 182 int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); 182 183 }; 183 184
+5
drivers/staging/android/ion/ion_system_heap.c
··· 34 34 static int order_to_index(unsigned int order) 35 35 { 36 36 int i; 37 + 37 38 for (i = 0; i < num_orders; i++) 38 39 if (order == orders[i]) 39 40 return i; ··· 93 92 94 93 if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { 95 94 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 95 + 96 96 ion_page_pool_free(pool, page); 97 97 } else { 98 98 __free_pages(page, order); ··· 244 242 245 243 for (i = 0; i < num_orders; i++) { 246 244 struct ion_page_pool *pool = sys_heap->pools[i]; 245 + 247 246 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); 248 247 } 249 248 ··· 270 267 struct ion_system_heap, 271 268 heap); 272 269 int i; 270 + 273 271 for (i = 0; i < num_orders; i++) { 274 272 struct ion_page_pool *pool = sys_heap->pools[i]; 273 + 275 274 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", 276 275 pool->high_count, pool->order, 277 276 (1 << pool->order) * PAGE_SIZE * pool->high_count);
+3
drivers/staging/android/logger.c
··· 108 108 { 109 109 if (file->f_mode & FMODE_READ) { 110 110 struct logger_reader *reader = file->private_data; 111 + 111 112 return reader->log; 112 113 } else 113 114 return file->private_data; ··· 125 124 size_t off, struct logger_entry *scratch) 126 125 { 127 126 size_t len = min(sizeof(struct logger_entry), log->size - off); 127 + 128 128 if (len != sizeof(struct logger_entry)) { 129 129 memcpy(((void *) scratch), log->buffer + off, len); 130 130 memcpy(((void *) scratch) + len, log->buffer, ··· 644 642 static long logger_set_version(struct logger_reader *reader, void __user *arg) 645 643 { 646 644 int version; 645 + 647 646 if (copy_from_user(&version, arg, sizeof(int))) 648 647 return -EFAULT; 649 648
+2
drivers/staging/android/sw_sync.c
··· 97 97 char *str, int size) 98 98 { 99 99 struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; 100 + 100 101 snprintf(str, size, "%d", pt->value); 101 102 } 102 103 ··· 157 156 static int sw_sync_release(struct inode *inode, struct file *file) 158 157 { 159 158 struct sw_sync_timeline *obj = file->private_data; 159 + 160 160 sync_timeline_destroy(&obj->obj); 161 161 return 0; 162 162 }
+8
drivers/staging/android/sync.c
··· 384 384 385 385 list_for_each_safe(pos, n, &fence->pt_list_head) { 386 386 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 387 + 387 388 sync_timeline_remove_pt(pt); 388 389 } 389 390 } ··· 395 394 396 395 list_for_each_safe(pos, n, &fence->pt_list_head) { 397 396 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 397 + 398 398 sync_pt_free(pt); 399 399 } 400 400 } ··· 829 827 unsigned long arg) 830 828 { 831 829 struct sync_fence *fence = file->private_data; 830 + 832 831 switch (cmd) { 833 832 case SYNC_IOC_WAIT: 834 833 return sync_fence_ioctl_wait(fence, arg); ··· 859 856 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) 860 857 { 861 858 int status = pt->status; 859 + 862 860 seq_printf(s, " %s%spt %s", 863 861 fence ? pt->parent->name : "", 864 862 fence ? "_" : "", 865 863 sync_status_str(status)); 866 864 if (pt->status) { 867 865 struct timeval tv = ktime_to_timeval(pt->timestamp); 866 + 868 867 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); 869 868 } 870 869 871 870 if (pt->parent->ops->timeline_value_str && 872 871 pt->parent->ops->pt_value_str) { 873 872 char value[64]; 873 + 874 874 pt->parent->ops->pt_value_str(pt, value, sizeof(value)); 875 875 seq_printf(s, ": %s", value); 876 876 if (fence) { ··· 898 892 899 893 if (obj->ops->timeline_value_str) { 900 894 char value[64]; 895 + 901 896 obj->ops->timeline_value_str(obj, value, sizeof(value)); 902 897 seq_printf(s, ": %s", value); 903 898 } else if (obj->ops->print_obj) { ··· 1008 1001 for (i = 0; i < s.count; i += DUMP_CHUNK) { 1009 1002 if ((s.count - i) > DUMP_CHUNK) { 1010 1003 char c = s.buf[i + DUMP_CHUNK]; 1004 + 1011 1005 s.buf[i + DUMP_CHUNK] = 0; 1012 1006 pr_cont("%s", s.buf + i); 1013 1007 s.buf[i + DUMP_CHUNK] = c;
+1
drivers/staging/android/timed_gpio.c
··· 51 51 if (hrtimer_active(&data->timer)) { 52 52 ktime_t r = hrtimer_get_remaining(&data->timer); 53 53 struct timeval t = ktime_to_timeval(r); 54 + 54 55 return t.tv_sec * 1000 + t.tv_usec / 1000; 55 56 } else 56 57 return 0;