Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: improve rsb searches

By pre-allocating rsb structs before searching the hash
table, they can be inserted immediately. This avoids
always having to repeat the search when adding the struct
to hash list.

This also adds space to the rsb struct for a max resource
name, so an rsb allocation can be used by any request.
The constant size also allows us to finally use a slab
for the rsb structs.

Signed-off-by: David Teigland <teigland@redhat.com>

+121 -48
+8 -1
fs/dlm/config.c
··· 102 102 unsigned int cl_protocol; 103 103 unsigned int cl_timewarn_cs; 104 104 unsigned int cl_waitwarn_us; 105 + unsigned int cl_new_rsb_count; 105 106 }; 106 107 107 108 enum { ··· 117 116 CLUSTER_ATTR_PROTOCOL, 118 117 CLUSTER_ATTR_TIMEWARN_CS, 119 118 CLUSTER_ATTR_WAITWARN_US, 119 + CLUSTER_ATTR_NEW_RSB_COUNT, 120 120 }; 121 121 122 122 struct cluster_attribute { ··· 170 168 CLUSTER_ATTR(protocol, 0); 171 169 CLUSTER_ATTR(timewarn_cs, 1); 172 170 CLUSTER_ATTR(waitwarn_us, 0); 171 + CLUSTER_ATTR(new_rsb_count, 0); 173 172 174 173 static struct configfs_attribute *cluster_attrs[] = { 175 174 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, ··· 184 181 [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr, 185 182 [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr, 186 183 [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr, 184 + [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr, 187 185 NULL, 188 186 }; 189 187 ··· 454 450 cl->cl_protocol = dlm_config.ci_protocol; 455 451 cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs; 456 452 cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us; 453 + cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count; 457 454 458 455 space_list = &sps->ss_group; 459 456 comm_list = &cms->cs_group; ··· 1046 1041 #define DEFAULT_PROTOCOL 0 1047 1042 #define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */ 1048 1043 #define DEFAULT_WAITWARN_US 0 1044 + #define DEFAULT_NEW_RSB_COUNT 128 1049 1045 1050 1046 struct dlm_config_info dlm_config = { 1051 1047 .ci_tcp_port = DEFAULT_TCP_PORT, ··· 1059 1053 .ci_log_debug = DEFAULT_LOG_DEBUG, 1060 1054 .ci_protocol = DEFAULT_PROTOCOL, 1061 1055 .ci_timewarn_cs = DEFAULT_TIMEWARN_CS, 1062 - .ci_waitwarn_us = DEFAULT_WAITWARN_US 1056 + .ci_waitwarn_us = DEFAULT_WAITWARN_US, 1057 + .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT 1063 1058 }; 1064 1059
+1
fs/dlm/config.h
··· 28 28 int ci_protocol; 29 29 int ci_timewarn_cs; 30 30 int ci_waitwarn_us; 31 + int ci_new_rsb_count; 31 32 }; 32 33 33 34 extern struct dlm_config_info dlm_config;
+5 -1
fs/dlm/dlm_internal.h
··· 293 293 int res_recover_locks_count; 294 294 295 295 char *res_lvbptr; 296 - char res_name[1]; 296 + char res_name[DLM_RESNAME_MAXLEN+1]; 297 297 }; 298 298 299 299 /* find_rsb() flags */ ··· 476 476 477 477 struct mutex ls_timeout_mutex; 478 478 struct list_head ls_timeout; 479 + 480 + spinlock_t ls_new_rsb_spin; 481 + int ls_new_rsb_count; 482 + struct list_head ls_new_rsb; /* new rsb structs */ 479 483 480 484 struct list_head ls_nodes; /* current nodes in ls */ 481 485 struct list_head ls_nodes_gone; /* dead node list, recovery */
+82 -37
fs/dlm/lock.c
··· 327 327 * Basic operations on rsb's and lkb's 328 328 */ 329 329 330 - static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len) 330 + static int pre_rsb_struct(struct dlm_ls *ls) 331 + { 332 + struct dlm_rsb *r1, *r2; 333 + int count = 0; 334 + 335 + spin_lock(&ls->ls_new_rsb_spin); 336 + if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { 337 + spin_unlock(&ls->ls_new_rsb_spin); 338 + return 0; 339 + } 340 + spin_unlock(&ls->ls_new_rsb_spin); 341 + 342 + r1 = dlm_allocate_rsb(ls); 343 + r2 = dlm_allocate_rsb(ls); 344 + 345 + spin_lock(&ls->ls_new_rsb_spin); 346 + if (r1) { 347 + list_add(&r1->res_hashchain, &ls->ls_new_rsb); 348 + ls->ls_new_rsb_count++; 349 + } 350 + if (r2) { 351 + list_add(&r2->res_hashchain, &ls->ls_new_rsb); 352 + ls->ls_new_rsb_count++; 353 + } 354 + count = ls->ls_new_rsb_count; 355 + spin_unlock(&ls->ls_new_rsb_spin); 356 + 357 + if (!count) 358 + return -ENOMEM; 359 + return 0; 360 + } 361 + 362 + /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can 363 + unlock any spinlocks, go back and call pre_rsb_struct again. 364 + Otherwise, take an rsb off the list and return it. */ 365 + 366 + static int get_rsb_struct(struct dlm_ls *ls, char *name, int len, 367 + struct dlm_rsb **r_ret) 331 368 { 332 369 struct dlm_rsb *r; 370 + int count; 333 371 334 - r = dlm_allocate_rsb(ls, len); 335 - if (!r) 336 - return NULL; 372 + spin_lock(&ls->ls_new_rsb_spin); 373 + if (list_empty(&ls->ls_new_rsb)) { 374 + count = ls->ls_new_rsb_count; 375 + spin_unlock(&ls->ls_new_rsb_spin); 376 + log_debug(ls, "find_rsb retry %d %d %s", 377 + count, dlm_config.ci_new_rsb_count, name); 378 + return -EAGAIN; 379 + } 380 + 381 + r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); 382 + list_del(&r->res_hashchain); 383 + ls->ls_new_rsb_count--; 384 + spin_unlock(&ls->ls_new_rsb_spin); 337 385 338 386 r->res_ls = ls; 339 387 r->res_length = len; 340 388 memcpy(r->res_name, name, len); 341 389 mutex_init(&r->res_mutex); 342 390 391 + INIT_LIST_HEAD(&r->res_hashchain); 343 392 INIT_LIST_HEAD(&r->res_lookup); 344 393 INIT_LIST_HEAD(&r->res_grantqueue); 345 394 INIT_LIST_HEAD(&r->res_convertqueue); ··· 396 347 INIT_LIST_HEAD(&r->res_root_list); 397 348 INIT_LIST_HEAD(&r->res_recover_list); 398 349 399 - return r; 350 + *r_ret = r; 351 + return 0; 400 352 } 401 353 402 354 static int search_rsb_list(struct list_head *head, char *name, int len, ··· 455 405 return error; 456 406 } 457 407 458 - static int search_rsb(struct dlm_ls *ls, char *name, int len, int b, 459 - unsigned int flags, struct dlm_rsb **r_ret) 460 - { 461 - int error; 462 - spin_lock(&ls->ls_rsbtbl[b].lock); 463 - error = _search_rsb(ls, name, len, b, flags, r_ret); 464 - spin_unlock(&ls->ls_rsbtbl[b].lock); 465 - return error; 466 - } 467 - 468 408 /* 469 409 * Find rsb in rsbtbl and potentially create/add one 470 410 * ··· 472 432 static int find_rsb(struct dlm_ls *ls, char *name, int namelen, 473 433 unsigned int flags, struct dlm_rsb **r_ret) 474 434 { 475 - struct dlm_rsb *r = NULL, *tmp; 435 + struct dlm_rsb *r = NULL; 476 436 uint32_t hash, bucket; 477 - int error = -EINVAL; 437 + int error; 478 438 479 - if (namelen > DLM_RESNAME_MAXLEN) 439 + if (namelen > DLM_RESNAME_MAXLEN) { 440 + error = -EINVAL; 480 441 goto out; 442 + } 481 443 482 444 if (dlm_no_directory(ls)) 483 445 flags |= R_CREATE; 484 446 485 - error = 0; 486 447 hash = jhash(name, namelen, 0); 487 448 bucket = hash & (ls->ls_rsbtbl_size - 1); 488 449 489 - error = search_rsb(ls, name, namelen, bucket, flags, &r); 450 + retry: 451 + if (flags & R_CREATE) { 452 + error = pre_rsb_struct(ls); 453 + if (error < 0) 454 + goto out; 455 + } 456 + 457 + spin_lock(&ls->ls_rsbtbl[bucket].lock); 458 + 459 + error = _search_rsb(ls, name, namelen, bucket, flags, &r); 490 460 if (!error) 491 - goto out; 461 + goto out_unlock; 492 462 493 463 if (error == -EBADR && !(flags & R_CREATE)) 494 - goto out; 464 + goto out_unlock; 495 465 496 466 /* the rsb was found but wasn't a master copy */ 497 467 if (error == -ENOTBLK) 498 - goto out; 468 + goto out_unlock; 499 469 500 - error = -ENOMEM; 501 - r = create_rsb(ls, name, namelen); 502 - if (!r) 503 - goto out; 470 + error = get_rsb_struct(ls, name, namelen, &r); 471 + if (error == -EAGAIN) { 472 + spin_unlock(&ls->ls_rsbtbl[bucket].lock); 473 + goto retry; 474 + } 475 + if (error) 476 + goto out_unlock; 504 477 505 478 r->res_hash = hash; 506 479 r->res_bucket = bucket; ··· 527 474 nodeid = 0; 528 475 r->res_nodeid = nodeid; 529 476 } 530 - 531 - spin_lock(&ls->ls_rsbtbl[bucket].lock); 532 - error = _search_rsb(ls, name, namelen, bucket, 0, &tmp); 533 - if (!error) { 534 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 535 - dlm_free_rsb(r); 536 - r = tmp; 537 - goto out; 538 - } 539 477 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list); 540 - spin_unlock(&ls->ls_rsbtbl[bucket].lock); 541 478 error = 0; 479 + out_unlock: 480 + spin_unlock(&ls->ls_rsbtbl[bucket].lock); 542 481 out: 543 482 *r_ret = r; 544 483 return error;
+10
fs/dlm/lockspace.c
··· 493 493 INIT_LIST_HEAD(&ls->ls_timeout); 494 494 mutex_init(&ls->ls_timeout_mutex); 495 495 496 + INIT_LIST_HEAD(&ls->ls_new_rsb); 497 + spin_lock_init(&ls->ls_new_rsb_spin); 498 + 496 499 INIT_LIST_HEAD(&ls->ls_nodes); 497 500 INIT_LIST_HEAD(&ls->ls_nodes_gone); 498 501 ls->ls_num_nodes = 0; ··· 766 763 } 767 764 768 765 vfree(ls->ls_rsbtbl); 766 + 767 + while (!list_empty(&ls->ls_new_rsb)) { 768 + rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, 769 + res_hashchain); 770 + list_del(&rsb->res_hashchain); 771 + dlm_free_rsb(rsb); 772 + } 769 773 770 774 /* 771 775 * Free structures on any other lists
+14 -8
fs/dlm/memory.c
··· 16 16 #include "memory.h" 17 17 18 18 static struct kmem_cache *lkb_cache; 19 + static struct kmem_cache *rsb_cache; 19 20 20 21 21 22 int __init dlm_memory_init(void) ··· 27 26 __alignof__(struct dlm_lkb), 0, NULL); 28 27 if (!lkb_cache) 29 28 ret = -ENOMEM; 29 + 30 + rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb), 31 + __alignof__(struct dlm_rsb), 0, NULL); 32 + if (!rsb_cache) { 33 + kmem_cache_destroy(lkb_cache); 34 + ret = -ENOMEM; 35 + } 36 + 30 37 return ret; 31 38 } 32 39 ··· 42 33 { 43 34 if (lkb_cache) 44 35 kmem_cache_destroy(lkb_cache); 36 + if (rsb_cache) 37 + kmem_cache_destroy(rsb_cache); 45 38 } 46 39 47 40 char *dlm_allocate_lvb(struct dlm_ls *ls) ··· 59 48 kfree(p); 60 49 } 61 50 62 - /* FIXME: have some minimal space built-in to rsb for the name and 63 - kmalloc a separate name if needed, like dentries are done */ 64 - 65 - struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen) 51 + struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) 66 52 { 67 53 struct dlm_rsb *r; 68 54 69 - DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,); 70 - 71 - r = kzalloc(sizeof(*r) + namelen, GFP_NOFS); 55 + r = kmem_cache_zalloc(rsb_cache, GFP_NOFS); 72 56 return r; 73 57 } 74 58 ··· 71 65 { 72 66 if (r->res_lvbptr) 73 67 dlm_free_lvb(r->res_lvbptr); 74 - kfree(r); 68 + kmem_cache_free(rsb_cache, r); 75 69 } 76 70 77 71 struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
+1 -1
fs/dlm/memory.h
··· 16 16 17 17 int dlm_memory_init(void); 18 18 void dlm_memory_exit(void); 19 - struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen); 19 + struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls); 20 20 void dlm_free_rsb(struct dlm_rsb *r); 21 21 struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls); 22 22 void dlm_free_lkb(struct dlm_lkb *l);