Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] Remove eventpoll macro obfuscation

This patch gets rid of some macro obfuscation from fs/eventpoll.c by
removing slab allocator wrappers and converting macros to static inline
functions.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Davide Libenzi <davidel@xmailserver.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Pekka Enberg and committed by
Linus Torvalds
b030a4dd 46c271be

+110 -85
+110 -85
fs/eventpoll.c
··· 101 101 /* Maximum number of poll wake up nests we are allowing */ 102 102 #define EP_MAX_POLLWAKE_NESTS 4 103 103 104 - /* Macro to allocate a "struct epitem" from the slab cache */ 105 - #define EPI_MEM_ALLOC() (struct epitem *) kmem_cache_alloc(epi_cache, SLAB_KERNEL) 106 - 107 - /* Macro to free a "struct epitem" to the slab cache */ 108 - #define EPI_MEM_FREE(p) kmem_cache_free(epi_cache, p) 109 - 110 - /* Macro to allocate a "struct eppoll_entry" from the slab cache */ 111 - #define PWQ_MEM_ALLOC() (struct eppoll_entry *) kmem_cache_alloc(pwq_cache, SLAB_KERNEL) 112 - 113 - /* Macro to free a "struct eppoll_entry" to the slab cache */ 114 - #define PWQ_MEM_FREE(p) kmem_cache_free(pwq_cache, p) 115 - 116 - /* Fast test to see if the file is an evenpoll file */ 117 - #define IS_FILE_EPOLL(f) ((f)->f_op == &eventpoll_fops) 118 - 119 - /* Setup the structure that is used as key for the rb-tree */ 120 - #define EP_SET_FFD(p, f, d) do { (p)->file = (f); (p)->fd = (d); } while (0) 121 - 122 - /* Compare rb-tree keys */ 123 - #define EP_CMP_FFD(p1, p2) ((p1)->file > (p2)->file ? +1: \ 124 - ((p1)->file < (p2)->file ? -1: (p1)->fd - (p2)->fd)) 125 - 126 - /* Special initialization for the rb-tree node to detect linkage */ 127 - #define EP_RB_INITNODE(n) (n)->rb_parent = (n) 128 - 129 - /* Removes a node from the rb-tree and marks it for a fast is-linked check */ 130 - #define EP_RB_ERASE(n, r) do { rb_erase(n, r); (n)->rb_parent = (n); } while (0) 131 - 132 - /* Fast check to verify that the item is linked to the main rb-tree */ 133 - #define EP_RB_LINKED(n) ((n)->rb_parent != (n)) 134 - 135 - /* 136 - * Remove the item from the list and perform its initialization. 137 - * This is useful for us because we can test if the item is linked 138 - * using "EP_IS_LINKED(p)". 139 - */ 140 - #define EP_LIST_DEL(p) do { list_del(p); INIT_LIST_HEAD(p); } while (0) 141 - 142 - /* Tells us if the item is currently linked */ 143 - #define EP_IS_LINKED(p) (!list_empty(p)) 144 - 145 - /* Get the "struct epitem" from a wait queue pointer */ 146 - #define EP_ITEM_FROM_WAIT(p) ((struct epitem *) container_of(p, struct eppoll_entry, wait)->base) 147 - 148 - /* Get the "struct epitem" from an epoll queue wrapper */ 149 - #define EP_ITEM_FROM_EPQUEUE(p) (container_of(p, struct ep_pqueue, pt)->epi) 150 - 151 - /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ 152 - #define EP_OP_HASH_EVENT(op) ((op) != EPOLL_CTL_DEL) 153 - 154 - 155 104 struct epoll_filefd { 156 105 struct file *file; 157 106 int fd; ··· 306 357 307 358 308 359 360 + /* Fast test to see if the file is an evenpoll file */ 361 + static inline int is_file_epoll(struct file *f) 362 + { 363 + return f->f_op == &eventpoll_fops; 364 + } 365 + 366 + /* Setup the structure that is used as key for the rb-tree */ 367 + static inline void ep_set_ffd(struct epoll_filefd *ffd, 368 + struct file *file, int fd) 369 + { 370 + ffd->file = file; 371 + ffd->fd = fd; 372 + } 373 + 374 + /* Compare rb-tree keys */ 375 + static inline int ep_cmp_ffd(struct epoll_filefd *p1, 376 + struct epoll_filefd *p2) 377 + { 378 + return (p1->file > p2->file ? +1: 379 + (p1->file < p2->file ? -1 : p1->fd - p2->fd)); 380 + } 381 + 382 + /* Special initialization for the rb-tree node to detect linkage */ 383 + static inline void ep_rb_initnode(struct rb_node *n) 384 + { 385 + n->rb_parent = n; 386 + } 387 + 388 + /* Removes a node from the rb-tree and marks it for a fast is-linked check */ 389 + static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) 390 + { 391 + rb_erase(n, r); 392 + n->rb_parent = n; 393 + } 394 + 395 + /* Fast check to verify that the item is linked to the main rb-tree */ 396 + static inline int ep_rb_linked(struct rb_node *n) 397 + { 398 + return n->rb_parent != n; 399 + } 400 + 401 + /* 402 + * Remove the item from the list and perform its initialization. 403 + * This is useful for us because we can test if the item is linked 404 + * using "ep_is_linked(p)". 405 + */ 406 + static inline void ep_list_del(struct list_head *p) 407 + { 408 + list_del(p); 409 + INIT_LIST_HEAD(p); 410 + } 411 + 412 + /* Tells us if the item is currently linked */ 413 + static inline int ep_is_linked(struct list_head *p) 414 + { 415 + return !list_empty(p); 416 + } 417 + 418 + /* Get the "struct epitem" from a wait queue pointer */ 419 + static inline struct epitem * ep_item_from_wait(wait_queue_t *p) 420 + { 421 + return container_of(p, struct eppoll_entry, wait)->base; 422 + } 423 + 424 + /* Get the "struct epitem" from an epoll queue wrapper */ 425 + static inline struct epitem * ep_item_from_epqueue(poll_table *p) 426 + { 427 + return container_of(p, struct ep_pqueue, pt)->epi; 428 + } 429 + 430 + /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ 431 + static inline int ep_op_hash_event(int op) 432 + { 433 + return op != EPOLL_CTL_DEL; 434 + } 435 + 309 436 /* Initialize the poll safe wake up structure */ 310 437 static void ep_poll_safewake_init(struct poll_safewake *psw) 311 438 { ··· 481 456 epi = list_entry(lsthead->next, struct epitem, fllink); 482 457 483 458 ep = epi->ep; 484 - EP_LIST_DEL(&epi->fllink); 459 + ep_list_del(&epi->fllink); 485 460 down_write(&ep->sem); 486 461 ep_remove(ep, epi); 487 462 up_write(&ep->sem); ··· 559 534 current, epfd, op, fd, event)); 560 535 561 536 error = -EFAULT; 562 - if (EP_OP_HASH_EVENT(op) && 537 + if (ep_op_hash_event(op) && 563 538 copy_from_user(&epds, event, sizeof(struct epoll_event))) 564 539 goto eexit_1; 565 540 ··· 585 560 * adding an epoll file descriptor inside itself. 586 561 */ 587 562 error = -EINVAL; 588 - if (file == tfile || !IS_FILE_EPOLL(file)) 563 + if (file == tfile || !is_file_epoll(file)) 589 564 goto eexit_3; 590 565 591 566 /* ··· 681 656 * the user passed to us _is_ an eventpoll file. 682 657 */ 683 658 error = -EINVAL; 684 - if (!IS_FILE_EPOLL(file)) 659 + if (!is_file_epoll(file)) 685 660 goto eexit_2; 686 661 687 662 /* ··· 856 831 struct epitem *epi, *epir = NULL; 857 832 struct epoll_filefd ffd; 858 833 859 - EP_SET_FFD(&ffd, file, fd); 834 + ep_set_ffd(&ffd, file, fd); 860 835 read_lock_irqsave(&ep->lock, flags); 861 836 for (rbp = ep->rbr.rb_node; rbp; ) { 862 837 epi = rb_entry(rbp, struct epitem, rbn); 863 - kcmp = EP_CMP_FFD(&ffd, &epi->ffd); 838 + kcmp = ep_cmp_ffd(&ffd, &epi->ffd); 864 839 if (kcmp > 0) 865 840 rbp = rbp->rb_right; 866 841 else if (kcmp < 0) ··· 900 875 { 901 876 902 877 if (atomic_dec_and_test(&epi->usecnt)) 903 - EPI_MEM_FREE(epi); 878 + kmem_cache_free(epi_cache, epi); 904 879 } 905 880 906 881 ··· 911 886 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, 912 887 poll_table *pt) 913 888 { 914 - struct epitem *epi = EP_ITEM_FROM_EPQUEUE(pt); 889 + struct epitem *epi = ep_item_from_epqueue(pt); 915 890 struct eppoll_entry *pwq; 916 891 917 - if (epi->nwait >= 0 && (pwq = PWQ_MEM_ALLOC())) { 892 + if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { 918 893 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); 919 894 pwq->whead = whead; 920 895 pwq->base = epi; ··· 937 912 while (*p) { 938 913 parent = *p; 939 914 epic = rb_entry(parent, struct epitem, rbn); 940 - kcmp = EP_CMP_FFD(&epi->ffd, &epic->ffd); 915 + kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); 941 916 if (kcmp > 0) 942 917 p = &parent->rb_right; 943 918 else ··· 957 932 struct ep_pqueue epq; 958 933 959 934 error = -ENOMEM; 960 - if (!(epi = EPI_MEM_ALLOC())) 935 + if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) 961 936 goto eexit_1; 962 937 963 938 /* Item initialization follow here ... */ 964 - EP_RB_INITNODE(&epi->rbn); 939 + ep_rb_initnode(&epi->rbn); 965 940 INIT_LIST_HEAD(&epi->rdllink); 966 941 INIT_LIST_HEAD(&epi->fllink); 967 942 INIT_LIST_HEAD(&epi->txlink); 968 943 INIT_LIST_HEAD(&epi->pwqlist); 969 944 epi->ep = ep; 970 - EP_SET_FFD(&epi->ffd, tfile, fd); 945 + ep_set_ffd(&epi->ffd, tfile, fd); 971 946 epi->event = *event; 972 947 atomic_set(&epi->usecnt, 1); 973 948 epi->nwait = 0; ··· 1003 978 ep_rbtree_insert(ep, epi); 1004 979 1005 980 /* If the file is already "ready" we drop it inside the ready list */ 1006 - if ((revents & event->events) && !EP_IS_LINKED(&epi->rdllink)) { 981 + if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { 1007 982 list_add_tail(&epi->rdllink, &ep->rdllist); 1008 983 1009 984 /* Notify waiting tasks that events are available */ ··· 1032 1007 * allocated wait queue. 1033 1008 */ 1034 1009 write_lock_irqsave(&ep->lock, flags); 1035 - if (EP_IS_LINKED(&epi->rdllink)) 1036 - EP_LIST_DEL(&epi->rdllink); 1010 + if (ep_is_linked(&epi->rdllink)) 1011 + ep_list_del(&epi->rdllink); 1037 1012 write_unlock_irqrestore(&ep->lock, flags); 1038 1013 1039 - EPI_MEM_FREE(epi); 1014 + kmem_cache_free(epi_cache, epi); 1040 1015 eexit_1: 1041 1016 return error; 1042 1017 } ··· 1075 1050 * If the item is not linked to the hash it means that it's on its 1076 1051 * way toward the removal. Do nothing in this case. 1077 1052 */ 1078 - if (EP_RB_LINKED(&epi->rbn)) { 1053 + if (ep_rb_linked(&epi->rbn)) { 1079 1054 /* 1080 1055 * If the item is "hot" and it is not registered inside the ready 1081 1056 * list, push it inside. If the item is not "hot" and it is currently 1082 1057 * registered inside the ready list, unlink it. 1083 1058 */ 1084 1059 if (revents & event->events) { 1085 - if (!EP_IS_LINKED(&epi->rdllink)) { 1060 + if (!ep_is_linked(&epi->rdllink)) { 1086 1061 list_add_tail(&epi->rdllink, &ep->rdllist); 1087 1062 1088 1063 /* Notify waiting tasks that events are available */ ··· 1122 1097 while (!list_empty(lsthead)) { 1123 1098 pwq = list_entry(lsthead->next, struct eppoll_entry, llink); 1124 1099 1125 - EP_LIST_DEL(&pwq->llink); 1100 + ep_list_del(&pwq->llink); 1126 1101 remove_wait_queue(pwq->whead, &pwq->wait); 1127 - PWQ_MEM_FREE(pwq); 1102 + kmem_cache_free(pwq_cache, pwq); 1128 1103 } 1129 1104 } 1130 1105 } ··· 1143 1118 * The check protect us from doing a double unlink ( crash ). 1144 1119 */ 1145 1120 error = -ENOENT; 1146 - if (!EP_RB_LINKED(&epi->rbn)) 1121 + if (!ep_rb_linked(&epi->rbn)) 1147 1122 goto eexit_1; 1148 1123 1149 1124 /* ··· 1158 1133 * This operation togheter with the above check closes the door to 1159 1134 * double unlinks. 1160 1135 */ 1161 - EP_RB_ERASE(&epi->rbn, &ep->rbr); 1136 + ep_rb_erase(&epi->rbn, &ep->rbr); 1162 1137 1163 1138 /* 1164 1139 * If the item we are going to remove is inside the ready file descriptors 1165 1140 * we want to remove it from this list to avoid stale events. 1166 1141 */ 1167 - if (EP_IS_LINKED(&epi->rdllink)) 1168 - EP_LIST_DEL(&epi->rdllink); 1142 + if (ep_is_linked(&epi->rdllink)) 1143 + ep_list_del(&epi->rdllink); 1169 1144 1170 1145 error = 0; 1171 1146 eexit_1: ··· 1199 1174 1200 1175 /* Remove the current item from the list of epoll hooks */ 1201 1176 spin_lock(&file->f_ep_lock); 1202 - if (EP_IS_LINKED(&epi->fllink)) 1203 - EP_LIST_DEL(&epi->fllink); 1177 + if (ep_is_linked(&epi->fllink)) 1178 + ep_list_del(&epi->fllink); 1204 1179 spin_unlock(&file->f_ep_lock); 1205 1180 1206 1181 /* We need to acquire the write IRQ lock before calling ep_unlink() */ ··· 1235 1210 { 1236 1211 int pwake = 0; 1237 1212 unsigned long flags; 1238 - struct epitem *epi = EP_ITEM_FROM_WAIT(wait); 1213 + struct epitem *epi = ep_item_from_wait(wait); 1239 1214 struct eventpoll *ep = epi->ep; 1240 1215 1241 1216 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", ··· 1253 1228 goto is_disabled; 1254 1229 1255 1230 /* If this file is already in the ready list we exit soon */ 1256 - if (EP_IS_LINKED(&epi->rdllink)) 1231 + if (ep_is_linked(&epi->rdllink)) 1257 1232 goto is_linked; 1258 1233 1259 1234 list_add_tail(&epi->rdllink, &ep->rdllist); ··· 1332 1307 lnk = lnk->next; 1333 1308 1334 1309 /* If this file is already in the ready list we exit soon */ 1335 - if (!EP_IS_LINKED(&epi->txlink)) { 1310 + if (!ep_is_linked(&epi->txlink)) { 1336 1311 /* 1337 1312 * This is initialized in this way so that the default 1338 1313 * behaviour of the reinjecting code will be to push back ··· 1347 1322 /* 1348 1323 * Unlink the item from the ready list. 1349 1324 */ 1350 - EP_LIST_DEL(&epi->rdllink); 1325 + ep_list_del(&epi->rdllink); 1351 1326 } 1352 1327 } 1353 1328 ··· 1426 1401 epi = list_entry(txlist->next, struct epitem, txlink); 1427 1402 1428 1403 /* Unlink the current item from the transfer list */ 1429 - EP_LIST_DEL(&epi->txlink); 1404 + ep_list_del(&epi->txlink); 1430 1405 1431 1406 /* 1432 1407 * If the item is no more linked to the interest set, we don't ··· 1435 1410 * item is set to have an Edge Triggered behaviour, we don't have 1436 1411 * to push it back either. 1437 1412 */ 1438 - if (EP_RB_LINKED(&epi->rbn) && !(epi->event.events & EPOLLET) && 1439 - (epi->revents & epi->event.events) && !EP_IS_LINKED(&epi->rdllink)) { 1413 + if (ep_rb_linked(&epi->rbn) && !(epi->event.events & EPOLLET) && 1414 + (epi->revents & epi->event.events) && !ep_is_linked(&epi->rdllink)) { 1440 1415 list_add_tail(&epi->rdllink, &ep->rdllist); 1441 1416 ricnt++; 1442 1417 }