at v2.6.39 719 lines 18 kB view raw
1/* 2 * Device operations for the pnfs nfs4 file layout driver. 3 * 4 * Copyright (c) 2002 5 * The Regents of the University of Michigan 6 * All Rights Reserved 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * Garth Goodson <Garth.Goodson@netapp.com> 10 * 11 * Permission is granted to use, copy, create derivative works, and 12 * redistribute this software and such derivative works for any purpose, 13 * so long as the name of the University of Michigan is not used in 14 * any advertising or publicity pertaining to the use or distribution 15 * of this software without specific, written prior authorization. If 16 * the above copyright notice or any other identification of the 17 * University of Michigan is included in any copy of any portion of 18 * this software, then the disclaimer below must also be included. 19 * 20 * This software is provided as is, without representation or warranty 21 * of any kind either express or implied, including without limitation 22 * the implied warranties of merchantability, fitness for a particular 23 * purpose, or noninfringement. The Regents of the University of 24 * Michigan shall not be liable for any damages, including special, 25 * indirect, incidental, or consequential damages, with respect to any 26 * claim arising out of or in connection with the use of the software, 27 * even if it has been or is hereafter advised of the possibility of 28 * such damages. 29 */ 30 31#include <linux/nfs_fs.h> 32#include <linux/vmalloc.h> 33 34#include "internal.h" 35#include "nfs4filelayout.h" 36 37#define NFSDBG_FACILITY NFSDBG_PNFS_LD 38 39/* 40 * Device ID RCU cache. A device ID is unique per client ID and layout type. 41 */ 42#define NFS4_FL_DEVICE_ID_HASH_BITS 5 43#define NFS4_FL_DEVICE_ID_HASH_SIZE (1 << NFS4_FL_DEVICE_ID_HASH_BITS) 44#define NFS4_FL_DEVICE_ID_HASH_MASK (NFS4_FL_DEVICE_ID_HASH_SIZE - 1) 45 46static inline u32 47nfs4_fl_deviceid_hash(struct nfs4_deviceid *id) 48{ 49 unsigned char *cptr = (unsigned char *)id->data; 50 unsigned int nbytes = NFS4_DEVICEID4_SIZE; 51 u32 x = 0; 52 53 while (nbytes--) { 54 x *= 37; 55 x += *cptr++; 56 } 57 return x & NFS4_FL_DEVICE_ID_HASH_MASK; 58} 59 60static struct hlist_head filelayout_deviceid_cache[NFS4_FL_DEVICE_ID_HASH_SIZE]; 61static DEFINE_SPINLOCK(filelayout_deviceid_lock); 62 63/* 64 * Data server cache 65 * 66 * Data servers can be mapped to different device ids. 67 * nfs4_pnfs_ds reference counting 68 * - set to 1 on allocation 69 * - incremented when a device id maps a data server already in the cache. 70 * - decremented when deviceid is removed from the cache. 71 */ 72DEFINE_SPINLOCK(nfs4_ds_cache_lock); 73static LIST_HEAD(nfs4_data_server_cache); 74 75/* Debug routines */ 76void 77print_ds(struct nfs4_pnfs_ds *ds) 78{ 79 if (ds == NULL) { 80 printk("%s NULL device\n", __func__); 81 return; 82 } 83 printk(" ip_addr %x port %hu\n" 84 " ref count %d\n" 85 " client %p\n" 86 " cl_exchange_flags %x\n", 87 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port), 88 atomic_read(&ds->ds_count), ds->ds_clp, 89 ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); 90} 91 92void 93print_ds_list(struct nfs4_file_layout_dsaddr *dsaddr) 94{ 95 int i; 96 97 ifdebug(FACILITY) { 98 printk("%s dsaddr->ds_num %d\n", __func__, 99 dsaddr->ds_num); 100 for (i = 0; i < dsaddr->ds_num; i++) 101 print_ds(dsaddr->ds_list[i]); 102 } 103} 104 105void print_deviceid(struct nfs4_deviceid *id) 106{ 107 u32 *p = (u32 *)id; 108 109 dprintk("%s: device id= [%x%x%x%x]\n", __func__, 110 p[0], p[1], p[2], p[3]); 111} 112 113/* nfs4_ds_cache_lock is held */ 114static struct nfs4_pnfs_ds * 115_data_server_lookup_locked(u32 ip_addr, u32 port) 116{ 117 struct nfs4_pnfs_ds *ds; 118 119 dprintk("_data_server_lookup: ip_addr=%x port=%hu\n", 120 ntohl(ip_addr), ntohs(port)); 121 122 list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) { 123 if (ds->ds_ip_addr == ip_addr && 124 ds->ds_port == port) { 125 return ds; 126 } 127 } 128 return NULL; 129} 130 131/* 132 * Create an rpc connection to the nfs4_pnfs_ds data server 133 * Currently only support IPv4 134 */ 135static int 136nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds) 137{ 138 struct nfs_client *clp; 139 struct sockaddr_in sin; 140 int status = 0; 141 142 dprintk("--> %s ip:port %x:%hu au_flavor %d\n", __func__, 143 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port), 144 mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); 145 146 sin.sin_family = AF_INET; 147 sin.sin_addr.s_addr = ds->ds_ip_addr; 148 sin.sin_port = ds->ds_port; 149 150 clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&sin, 151 sizeof(sin), IPPROTO_TCP); 152 if (IS_ERR(clp)) { 153 status = PTR_ERR(clp); 154 goto out; 155 } 156 157 if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) { 158 if (!is_ds_client(clp)) { 159 status = -ENODEV; 160 goto out_put; 161 } 162 ds->ds_clp = clp; 163 dprintk("%s [existing] ip=%x, port=%hu\n", __func__, 164 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port)); 165 goto out; 166 } 167 168 /* 169 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to 170 * be equal to the MDS lease. Renewal is scheduled in create_session. 171 */ 172 spin_lock(&mds_srv->nfs_client->cl_lock); 173 clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time; 174 spin_unlock(&mds_srv->nfs_client->cl_lock); 175 clp->cl_last_renewal = jiffies; 176 177 /* New nfs_client */ 178 status = nfs4_init_ds_session(clp); 179 if (status) 180 goto out_put; 181 182 ds->ds_clp = clp; 183 dprintk("%s [new] ip=%x, port=%hu\n", __func__, ntohl(ds->ds_ip_addr), 184 ntohs(ds->ds_port)); 185out: 186 return status; 187out_put: 188 nfs_put_client(clp); 189 goto out; 190} 191 192static void 193destroy_ds(struct nfs4_pnfs_ds *ds) 194{ 195 dprintk("--> %s\n", __func__); 196 ifdebug(FACILITY) 197 print_ds(ds); 198 199 if (ds->ds_clp) 200 nfs_put_client(ds->ds_clp); 201 kfree(ds); 202} 203 204static void 205nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) 206{ 207 struct nfs4_pnfs_ds *ds; 208 int i; 209 210 print_deviceid(&dsaddr->deviceid); 211 212 for (i = 0; i < dsaddr->ds_num; i++) { 213 ds = dsaddr->ds_list[i]; 214 if (ds != NULL) { 215 if (atomic_dec_and_lock(&ds->ds_count, 216 &nfs4_ds_cache_lock)) { 217 list_del_init(&ds->ds_node); 218 spin_unlock(&nfs4_ds_cache_lock); 219 destroy_ds(ds); 220 } 221 } 222 } 223 kfree(dsaddr->stripe_indices); 224 kfree(dsaddr); 225} 226 227static struct nfs4_pnfs_ds * 228nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) 229{ 230 struct nfs4_pnfs_ds *tmp_ds, *ds; 231 232 ds = kzalloc(sizeof(*tmp_ds), gfp_flags); 233 if (!ds) 234 goto out; 235 236 spin_lock(&nfs4_ds_cache_lock); 237 tmp_ds = _data_server_lookup_locked(ip_addr, port); 238 if (tmp_ds == NULL) { 239 ds->ds_ip_addr = ip_addr; 240 ds->ds_port = port; 241 atomic_set(&ds->ds_count, 1); 242 INIT_LIST_HEAD(&ds->ds_node); 243 ds->ds_clp = NULL; 244 list_add(&ds->ds_node, &nfs4_data_server_cache); 245 dprintk("%s add new data server ip 0x%x\n", __func__, 246 ds->ds_ip_addr); 247 } else { 248 kfree(ds); 249 atomic_inc(&tmp_ds->ds_count); 250 dprintk("%s data server found ip 0x%x, inc'ed ds_count to %d\n", 251 __func__, tmp_ds->ds_ip_addr, 252 atomic_read(&tmp_ds->ds_count)); 253 ds = tmp_ds; 254 } 255 spin_unlock(&nfs4_ds_cache_lock); 256out: 257 return ds; 258} 259 260/* 261 * Currently only support ipv4, and one multi-path address. 262 */ 263static struct nfs4_pnfs_ds * 264decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) 265{ 266 struct nfs4_pnfs_ds *ds = NULL; 267 char *buf; 268 const char *ipend, *pstr; 269 u32 ip_addr, port; 270 int nlen, rlen, i; 271 int tmp[2]; 272 __be32 *p; 273 274 /* r_netid */ 275 p = xdr_inline_decode(streamp, 4); 276 if (unlikely(!p)) 277 goto out_err; 278 nlen = be32_to_cpup(p++); 279 280 p = xdr_inline_decode(streamp, nlen); 281 if (unlikely(!p)) 282 goto out_err; 283 284 /* Check that netid is "tcp" */ 285 if (nlen != 3 || memcmp((char *)p, "tcp", 3)) { 286 dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__); 287 goto out_err; 288 } 289 290 /* r_addr */ 291 p = xdr_inline_decode(streamp, 4); 292 if (unlikely(!p)) 293 goto out_err; 294 rlen = be32_to_cpup(p); 295 296 p = xdr_inline_decode(streamp, rlen); 297 if (unlikely(!p)) 298 goto out_err; 299 300 /* ipv6 length plus port is legal */ 301 if (rlen > INET6_ADDRSTRLEN + 8) { 302 dprintk("%s: Invalid address, length %d\n", __func__, 303 rlen); 304 goto out_err; 305 } 306 buf = kmalloc(rlen + 1, gfp_flags); 307 if (!buf) { 308 dprintk("%s: Not enough memory\n", __func__); 309 goto out_err; 310 } 311 buf[rlen] = '\0'; 312 memcpy(buf, p, rlen); 313 314 /* replace the port dots with dashes for the in4_pton() delimiter*/ 315 for (i = 0; i < 2; i++) { 316 char *res = strrchr(buf, '.'); 317 if (!res) { 318 dprintk("%s: Failed finding expected dots in port\n", 319 __func__); 320 goto out_free; 321 } 322 *res = '-'; 323 } 324 325 /* Currently only support ipv4 address */ 326 if (in4_pton(buf, rlen, (u8 *)&ip_addr, '-', &ipend) == 0) { 327 dprintk("%s: Only ipv4 addresses supported\n", __func__); 328 goto out_free; 329 } 330 331 /* port */ 332 pstr = ipend; 333 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); 334 port = htons((tmp[0] << 8) | (tmp[1])); 335 336 ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); 337 dprintk("%s: Decoded address and port %s\n", __func__, buf); 338out_free: 339 kfree(buf); 340out_err: 341 return ds; 342} 343 344/* Decode opaque device data and return the result */ 345static struct nfs4_file_layout_dsaddr* 346decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) 347{ 348 int i; 349 u32 cnt, num; 350 u8 *indexp; 351 __be32 *p; 352 u8 *stripe_indices; 353 u8 max_stripe_index; 354 struct nfs4_file_layout_dsaddr *dsaddr = NULL; 355 struct xdr_stream stream; 356 struct xdr_buf buf = { 357 .pages = pdev->pages, 358 .page_len = pdev->pglen, 359 .buflen = pdev->pglen, 360 .len = pdev->pglen, 361 }; 362 struct page *scratch; 363 364 /* set up xdr stream */ 365 scratch = alloc_page(gfp_flags); 366 if (!scratch) 367 goto out_err; 368 369 xdr_init_decode(&stream, &buf, NULL); 370 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); 371 372 /* Get the stripe count (number of stripe index) */ 373 p = xdr_inline_decode(&stream, 4); 374 if (unlikely(!p)) 375 goto out_err_free_scratch; 376 377 cnt = be32_to_cpup(p); 378 dprintk("%s stripe count %d\n", __func__, cnt); 379 if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) { 380 printk(KERN_WARNING "%s: stripe count %d greater than " 381 "supported maximum %d\n", __func__, 382 cnt, NFS4_PNFS_MAX_STRIPE_CNT); 383 goto out_err_free_scratch; 384 } 385 386 /* read stripe indices */ 387 stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); 388 if (!stripe_indices) 389 goto out_err_free_scratch; 390 391 p = xdr_inline_decode(&stream, cnt << 2); 392 if (unlikely(!p)) 393 goto out_err_free_stripe_indices; 394 395 indexp = &stripe_indices[0]; 396 max_stripe_index = 0; 397 for (i = 0; i < cnt; i++) { 398 *indexp = be32_to_cpup(p++); 399 max_stripe_index = max(max_stripe_index, *indexp); 400 indexp++; 401 } 402 403 /* Check the multipath list count */ 404 p = xdr_inline_decode(&stream, 4); 405 if (unlikely(!p)) 406 goto out_err_free_stripe_indices; 407 408 num = be32_to_cpup(p); 409 dprintk("%s ds_num %u\n", __func__, num); 410 if (num > NFS4_PNFS_MAX_MULTI_CNT) { 411 printk(KERN_WARNING "%s: multipath count %d greater than " 412 "supported maximum %d\n", __func__, 413 num, NFS4_PNFS_MAX_MULTI_CNT); 414 goto out_err_free_stripe_indices; 415 } 416 417 /* validate stripe indices are all < num */ 418 if (max_stripe_index >= num) { 419 printk(KERN_WARNING "%s: stripe index %u >= num ds %u\n", 420 __func__, max_stripe_index, num); 421 goto out_err_free_stripe_indices; 422 } 423 424 dsaddr = kzalloc(sizeof(*dsaddr) + 425 (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), 426 gfp_flags); 427 if (!dsaddr) 428 goto out_err_free_stripe_indices; 429 430 dsaddr->stripe_count = cnt; 431 dsaddr->stripe_indices = stripe_indices; 432 stripe_indices = NULL; 433 dsaddr->ds_num = num; 434 435 memcpy(&dsaddr->deviceid, &pdev->dev_id, sizeof(pdev->dev_id)); 436 437 for (i = 0; i < dsaddr->ds_num; i++) { 438 int j; 439 u32 mp_count; 440 441 p = xdr_inline_decode(&stream, 4); 442 if (unlikely(!p)) 443 goto out_err_free_deviceid; 444 445 mp_count = be32_to_cpup(p); /* multipath count */ 446 if (mp_count > 1) { 447 printk(KERN_WARNING 448 "%s: Multipath count %d not supported, " 449 "skipping all greater than 1\n", __func__, 450 mp_count); 451 } 452 for (j = 0; j < mp_count; j++) { 453 if (j == 0) { 454 dsaddr->ds_list[i] = decode_and_add_ds(&stream, 455 ino, gfp_flags); 456 if (dsaddr->ds_list[i] == NULL) 457 goto out_err_free_deviceid; 458 } else { 459 u32 len; 460 /* skip extra multipath */ 461 462 /* read len, skip */ 463 p = xdr_inline_decode(&stream, 4); 464 if (unlikely(!p)) 465 goto out_err_free_deviceid; 466 len = be32_to_cpup(p); 467 468 p = xdr_inline_decode(&stream, len); 469 if (unlikely(!p)) 470 goto out_err_free_deviceid; 471 472 /* read len, skip */ 473 p = xdr_inline_decode(&stream, 4); 474 if (unlikely(!p)) 475 goto out_err_free_deviceid; 476 len = be32_to_cpup(p); 477 478 p = xdr_inline_decode(&stream, len); 479 if (unlikely(!p)) 480 goto out_err_free_deviceid; 481 } 482 } 483 } 484 485 __free_page(scratch); 486 return dsaddr; 487 488out_err_free_deviceid: 489 nfs4_fl_free_deviceid(dsaddr); 490 /* stripe_indicies was part of dsaddr */ 491 goto out_err_free_scratch; 492out_err_free_stripe_indices: 493 kfree(stripe_indices); 494out_err_free_scratch: 495 __free_page(scratch); 496out_err: 497 dprintk("%s ERROR: returning NULL\n", __func__); 498 return NULL; 499} 500 501/* 502 * Decode the opaque device specified in 'dev' and add it to the cache of 503 * available devices. 504 */ 505static struct nfs4_file_layout_dsaddr * 506decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) 507{ 508 struct nfs4_file_layout_dsaddr *d, *new; 509 long hash; 510 511 new = decode_device(inode, dev, gfp_flags); 512 if (!new) { 513 printk(KERN_WARNING "%s: Could not decode or add device\n", 514 __func__); 515 return NULL; 516 } 517 518 spin_lock(&filelayout_deviceid_lock); 519 d = nfs4_fl_find_get_deviceid(&new->deviceid); 520 if (d) { 521 spin_unlock(&filelayout_deviceid_lock); 522 nfs4_fl_free_deviceid(new); 523 return d; 524 } 525 526 INIT_HLIST_NODE(&new->node); 527 atomic_set(&new->ref, 1); 528 hash = nfs4_fl_deviceid_hash(&new->deviceid); 529 hlist_add_head_rcu(&new->node, &filelayout_deviceid_cache[hash]); 530 spin_unlock(&filelayout_deviceid_lock); 531 532 return new; 533} 534 535/* 536 * Retrieve the information for dev_id, add it to the list 537 * of available devices, and return it. 538 */ 539struct nfs4_file_layout_dsaddr * 540get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) 541{ 542 struct pnfs_device *pdev = NULL; 543 u32 max_resp_sz; 544 int max_pages; 545 struct page **pages = NULL; 546 struct nfs4_file_layout_dsaddr *dsaddr = NULL; 547 int rc, i; 548 struct nfs_server *server = NFS_SERVER(inode); 549 550 /* 551 * Use the session max response size as the basis for setting 552 * GETDEVICEINFO's maxcount 553 */ 554 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 555 max_pages = max_resp_sz >> PAGE_SHIFT; 556 dprintk("%s inode %p max_resp_sz %u max_pages %d\n", 557 __func__, inode, max_resp_sz, max_pages); 558 559 pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); 560 if (pdev == NULL) 561 return NULL; 562 563 pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); 564 if (pages == NULL) { 565 kfree(pdev); 566 return NULL; 567 } 568 for (i = 0; i < max_pages; i++) { 569 pages[i] = alloc_page(gfp_flags); 570 if (!pages[i]) 571 goto out_free; 572 } 573 574 memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); 575 pdev->layout_type = LAYOUT_NFSV4_1_FILES; 576 pdev->pages = pages; 577 pdev->pgbase = 0; 578 pdev->pglen = PAGE_SIZE * max_pages; 579 pdev->mincount = 0; 580 581 rc = nfs4_proc_getdeviceinfo(server, pdev); 582 dprintk("%s getdevice info returns %d\n", __func__, rc); 583 if (rc) 584 goto out_free; 585 586 /* 587 * Found new device, need to decode it and then add it to the 588 * list of known devices for this mountpoint. 589 */ 590 dsaddr = decode_and_add_device(inode, pdev, gfp_flags); 591out_free: 592 for (i = 0; i < max_pages; i++) 593 __free_page(pages[i]); 594 kfree(pages); 595 kfree(pdev); 596 dprintk("<-- %s dsaddr %p\n", __func__, dsaddr); 597 return dsaddr; 598} 599 600void 601nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) 602{ 603 if (atomic_dec_and_lock(&dsaddr->ref, &filelayout_deviceid_lock)) { 604 hlist_del_rcu(&dsaddr->node); 605 spin_unlock(&filelayout_deviceid_lock); 606 607 synchronize_rcu(); 608 nfs4_fl_free_deviceid(dsaddr); 609 } 610} 611 612struct nfs4_file_layout_dsaddr * 613nfs4_fl_find_get_deviceid(struct nfs4_deviceid *id) 614{ 615 struct nfs4_file_layout_dsaddr *d; 616 struct hlist_node *n; 617 long hash = nfs4_fl_deviceid_hash(id); 618 619 620 rcu_read_lock(); 621 hlist_for_each_entry_rcu(d, n, &filelayout_deviceid_cache[hash], node) { 622 if (!memcmp(&d->deviceid, id, sizeof(*id))) { 623 if (!atomic_inc_not_zero(&d->ref)) 624 goto fail; 625 rcu_read_unlock(); 626 return d; 627 } 628 } 629fail: 630 rcu_read_unlock(); 631 return NULL; 632} 633 634/* 635 * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit 636 * Then: ((res + fsi) % dsaddr->stripe_count) 637 */ 638u32 639nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset) 640{ 641 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); 642 u64 tmp; 643 644 tmp = offset - flseg->pattern_offset; 645 do_div(tmp, flseg->stripe_unit); 646 tmp += flseg->first_stripe_index; 647 return do_div(tmp, flseg->dsaddr->stripe_count); 648} 649 650u32 651nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j) 652{ 653 return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j]; 654} 655 656struct nfs_fh * 657nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j) 658{ 659 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); 660 u32 i; 661 662 if (flseg->stripe_type == STRIPE_SPARSE) { 663 if (flseg->num_fh == 1) 664 i = 0; 665 else if (flseg->num_fh == 0) 666 /* Use the MDS OPEN fh set in nfs_read_rpcsetup */ 667 return NULL; 668 else 669 i = nfs4_fl_calc_ds_index(lseg, j); 670 } else 671 i = j; 672 return flseg->fh_array[i]; 673} 674 675static void 676filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr, 677 int err, u32 ds_addr) 678{ 679 u32 *p = (u32 *)&dsaddr->deviceid; 680 681 printk(KERN_ERR "NFS: data server %x connection error %d." 682 " Deviceid [%x%x%x%x] marked out of use.\n", 683 ds_addr, err, p[0], p[1], p[2], p[3]); 684 685 spin_lock(&filelayout_deviceid_lock); 686 dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY; 687 spin_unlock(&filelayout_deviceid_lock); 688} 689 690struct nfs4_pnfs_ds * 691nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) 692{ 693 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; 694 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; 695 696 if (ds == NULL) { 697 printk(KERN_ERR "%s: No data server for offset index %d\n", 698 __func__, ds_idx); 699 return NULL; 700 } 701 702 if (!ds->ds_clp) { 703 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 704 int err; 705 706 if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) { 707 /* Already tried to connect, don't try again */ 708 dprintk("%s Deviceid marked out of use\n", __func__); 709 return NULL; 710 } 711 err = nfs4_ds_connect(s, ds); 712 if (err) { 713 filelayout_mark_devid_negative(dsaddr, err, 714 ntohl(ds->ds_ip_addr)); 715 return NULL; 716 } 717 } 718 return ds; 719}