Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc4 2076 lines 58 kB view raw
1/* 2 * osd_initiator - Main body of the osd initiator library. 3 * 4 * Note: The file does not contain the advanced security functionality which 5 * is only needed by the security_manager's initiators. 6 * 7 * Copyright (C) 2008 Panasas Inc. All rights reserved. 8 * 9 * Authors: 10 * Boaz Harrosh <ooo@electrozaur.com> 11 * Benny Halevy <bhalevy@panasas.com> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of the Panasas company nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 32 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 37 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 38 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42#include <linux/slab.h> 43#include <linux/module.h> 44 45#include <scsi/osd_initiator.h> 46#include <scsi/osd_sec.h> 47#include <scsi/osd_attributes.h> 48#include <scsi/osd_sense.h> 49 50#include <scsi/scsi_device.h> 51#include <scsi/scsi_request.h> 52 53#include "osd_debug.h" 54 55#ifndef __unused 56# define __unused __attribute__((unused)) 57#endif 58 59enum { OSD_REQ_RETRIES = 1 }; 60 61MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>"); 62MODULE_DESCRIPTION("open-osd initiator library libosd.ko"); 63MODULE_LICENSE("GPL"); 64 65static inline void build_test(void) 66{ 67 /* structures were not packed */ 68 BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN); 69 BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN); 70 BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN); 71} 72 73static const char *_osd_ver_desc(struct osd_request *or) 74{ 75 return osd_req_is_ver1(or) ? "OSD1" : "OSD2"; 76} 77 78#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len) 79 80static int _osd_get_print_system_info(struct osd_dev *od, 81 void *caps, struct osd_dev_info *odi) 82{ 83 struct osd_request *or; 84 struct osd_attr get_attrs[] = { 85 ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8), 86 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16), 87 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32), 88 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4), 89 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/), 90 ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/), 91 ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8), 92 ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8), 93 ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8), 94 ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6), 95 /* IBM-OSD-SIM Has a bug with this one put it last */ 96 ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20), 97 }; 98 void *iter = NULL, *pFirst; 99 int nelem = ARRAY_SIZE(get_attrs), a = 0; 100 int ret; 101 102 or = osd_start_request(od, GFP_KERNEL); 103 if (!or) 104 return -ENOMEM; 105 106 /* get attrs */ 107 osd_req_get_attributes(or, &osd_root_object); 108 osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs)); 109 110 ret = osd_finalize_request(or, 0, caps, NULL); 111 if (ret) 112 goto out; 113 114 ret = osd_execute_request(or); 115 if (ret) { 116 OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret); 117 goto out; 118 } 119 120 osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter); 121 122 OSD_INFO("Detected %s device\n", 123 _osd_ver_desc(or)); 124 125 pFirst = get_attrs[a++].val_ptr; 126 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n", 127 (char *)pFirst); 128 129 pFirst = get_attrs[a++].val_ptr; 130 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n", 131 (char *)pFirst); 132 133 pFirst = get_attrs[a++].val_ptr; 134 OSD_INFO("PRODUCT_MODEL [%s]\n", 135 (char *)pFirst); 136 137 pFirst = get_attrs[a++].val_ptr; 138 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n", 139 pFirst ? get_unaligned_be32(pFirst) : ~0U); 140 141 pFirst = get_attrs[a++].val_ptr; 142 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", 143 (char *)pFirst); 144 145 odi->osdname_len = get_attrs[a].len; 146 /* Avoid NULL for memcmp optimization 0-length is good enough */ 147 odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL); 148 if (!odi->osdname) { 149 ret = -ENOMEM; 150 goto out; 151 } 152 if (odi->osdname_len) 153 memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len); 154 OSD_INFO("OSD_NAME [%s]\n", odi->osdname); 155 a++; 156 157 pFirst = get_attrs[a++].val_ptr; 158 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n", 159 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 160 161 pFirst = get_attrs[a++].val_ptr; 162 OSD_INFO("USED_CAPACITY [0x%llx]\n", 163 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 164 165 pFirst = get_attrs[a++].val_ptr; 166 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n", 167 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 168 169 if (a >= nelem) 170 goto out; 171 172 /* FIXME: Where are the time utilities */ 173 pFirst = get_attrs[a++].val_ptr; 174 OSD_INFO("CLOCK [0x%6phN]\n", pFirst); 175 176 if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */ 177 unsigned len = get_attrs[a].len; 178 char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */ 179 180 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, 181 sid_dump, sizeof(sid_dump), true); 182 OSD_INFO("OSD_SYSTEM_ID(%d)\n" 183 " [%s]\n", len, sid_dump); 184 185 if (unlikely(len > sizeof(odi->systemid))) { 186 OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). " 187 "device identification might not work\n", len); 188 len = sizeof(odi->systemid); 189 } 190 odi->systemid_len = len; 191 memcpy(odi->systemid, get_attrs[a].val_ptr, len); 192 a++; 193 } 194out: 195 osd_end_request(or); 196 return ret; 197} 198 199int osd_auto_detect_ver(struct osd_dev *od, 200 void *caps, struct osd_dev_info *odi) 201{ 202 int ret; 203 204 /* Auto-detect the osd version */ 205 ret = _osd_get_print_system_info(od, caps, odi); 206 if (ret) { 207 osd_dev_set_ver(od, OSD_VER1); 208 OSD_DEBUG("converting to OSD1\n"); 209 ret = _osd_get_print_system_info(od, caps, odi); 210 } 211 212 return ret; 213} 214EXPORT_SYMBOL(osd_auto_detect_ver); 215 216static unsigned _osd_req_cdb_len(struct osd_request *or) 217{ 218 return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN; 219} 220 221static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len) 222{ 223 return osd_req_is_ver1(or) ? 224 osdv1_attr_list_elem_size(len) : 225 osdv2_attr_list_elem_size(len); 226} 227 228static void _osd_req_alist_elem_encode(struct osd_request *or, 229 void *attr_last, const struct osd_attr *oa) 230{ 231 if (osd_req_is_ver1(or)) { 232 struct osdv1_attributes_list_element *attr = attr_last; 233 234 attr->attr_page = cpu_to_be32(oa->attr_page); 235 attr->attr_id = cpu_to_be32(oa->attr_id); 236 attr->attr_bytes = cpu_to_be16(oa->len); 237 memcpy(attr->attr_val, oa->val_ptr, oa->len); 238 } else { 239 struct osdv2_attributes_list_element *attr = attr_last; 240 241 attr->attr_page = cpu_to_be32(oa->attr_page); 242 attr->attr_id = cpu_to_be32(oa->attr_id); 243 attr->attr_bytes = cpu_to_be16(oa->len); 244 memcpy(attr->attr_val, oa->val_ptr, oa->len); 245 } 246} 247 248static int _osd_req_alist_elem_decode(struct osd_request *or, 249 void *cur_p, struct osd_attr *oa, unsigned max_bytes) 250{ 251 unsigned inc; 252 if (osd_req_is_ver1(or)) { 253 struct osdv1_attributes_list_element *attr = cur_p; 254 255 if (max_bytes < sizeof(*attr)) 256 return -1; 257 258 oa->len = be16_to_cpu(attr->attr_bytes); 259 inc = _osd_req_alist_elem_size(or, oa->len); 260 if (inc > max_bytes) 261 return -1; 262 263 oa->attr_page = be32_to_cpu(attr->attr_page); 264 oa->attr_id = be32_to_cpu(attr->attr_id); 265 266 /* OSD1: On empty attributes we return a pointer to 2 bytes 267 * of zeros. This keeps similar behaviour with OSD2. 268 * (See below) 269 */ 270 oa->val_ptr = likely(oa->len) ? attr->attr_val : 271 (u8 *)&attr->attr_bytes; 272 } else { 273 struct osdv2_attributes_list_element *attr = cur_p; 274 275 if (max_bytes < sizeof(*attr)) 276 return -1; 277 278 oa->len = be16_to_cpu(attr->attr_bytes); 279 inc = _osd_req_alist_elem_size(or, oa->len); 280 if (inc > max_bytes) 281 return -1; 282 283 oa->attr_page = be32_to_cpu(attr->attr_page); 284 oa->attr_id = be32_to_cpu(attr->attr_id); 285 286 /* OSD2: For convenience, on empty attributes, we return 8 bytes 287 * of zeros here. This keeps the same behaviour with OSD2r04, 288 * and is nice with null terminating ASCII fields. 289 * oa->val_ptr == NULL marks the end-of-list, or error. 290 */ 291 oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved; 292 } 293 return inc; 294} 295 296static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head) 297{ 298 return osd_req_is_ver1(or) ? 299 osdv1_list_size(list_head) : 300 osdv2_list_size(list_head); 301} 302 303static unsigned _osd_req_sizeof_alist_header(struct osd_request *or) 304{ 305 return osd_req_is_ver1(or) ? 306 sizeof(struct osdv1_attributes_list_header) : 307 sizeof(struct osdv2_attributes_list_header); 308} 309 310static void _osd_req_set_alist_type(struct osd_request *or, 311 void *list, int list_type) 312{ 313 if (osd_req_is_ver1(or)) { 314 struct osdv1_attributes_list_header *attr_list = list; 315 316 memset(attr_list, 0, sizeof(*attr_list)); 317 attr_list->type = list_type; 318 } else { 319 struct osdv2_attributes_list_header *attr_list = list; 320 321 memset(attr_list, 0, sizeof(*attr_list)); 322 attr_list->type = list_type; 323 } 324} 325 326static bool _osd_req_is_alist_type(struct osd_request *or, 327 void *list, int list_type) 328{ 329 if (!list) 330 return false; 331 332 if (osd_req_is_ver1(or)) { 333 struct osdv1_attributes_list_header *attr_list = list; 334 335 return attr_list->type == list_type; 336 } else { 337 struct osdv2_attributes_list_header *attr_list = list; 338 339 return attr_list->type == list_type; 340 } 341} 342 343/* This is for List-objects not Attributes-Lists */ 344static void _osd_req_encode_olist(struct osd_request *or, 345 struct osd_obj_id_list *list) 346{ 347 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 348 349 if (osd_req_is_ver1(or)) { 350 cdbh->v1.list_identifier = list->list_identifier; 351 cdbh->v1.start_address = list->continuation_id; 352 } else { 353 cdbh->v2.list_identifier = list->list_identifier; 354 cdbh->v2.start_address = list->continuation_id; 355 } 356} 357 358static osd_cdb_offset osd_req_encode_offset(struct osd_request *or, 359 u64 offset, unsigned *padding) 360{ 361 return __osd_encode_offset(offset, padding, 362 osd_req_is_ver1(or) ? 363 OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT, 364 OSD_OFFSET_MAX_SHIFT); 365} 366 367static struct osd_security_parameters * 368_osd_req_sec_params(struct osd_request *or) 369{ 370 struct osd_cdb *ocdb = &or->cdb; 371 372 if (osd_req_is_ver1(or)) 373 return (struct osd_security_parameters *)&ocdb->v1.sec_params; 374 else 375 return (struct osd_security_parameters *)&ocdb->v2.sec_params; 376} 377 378void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device) 379{ 380 memset(osdd, 0, sizeof(*osdd)); 381 osdd->scsi_device = scsi_device; 382 osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT; 383#ifdef OSD_VER1_SUPPORT 384 osdd->version = OSD_VER2; 385#endif 386 /* TODO: Allocate pools for osd_request attributes ... */ 387} 388EXPORT_SYMBOL(osd_dev_init); 389 390void osd_dev_fini(struct osd_dev *osdd) 391{ 392 /* TODO: De-allocate pools */ 393 394 osdd->scsi_device = NULL; 395} 396EXPORT_SYMBOL(osd_dev_fini); 397 398static struct osd_request *_osd_request_alloc(gfp_t gfp) 399{ 400 struct osd_request *or; 401 402 /* TODO: Use mempool with one saved request */ 403 or = kzalloc(sizeof(*or), gfp); 404 return or; 405} 406 407static void _osd_request_free(struct osd_request *or) 408{ 409 kfree(or); 410} 411 412struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp) 413{ 414 struct osd_request *or; 415 416 or = _osd_request_alloc(gfp); 417 if (!or) 418 return NULL; 419 420 or->osd_dev = dev; 421 or->alloc_flags = gfp; 422 or->timeout = dev->def_timeout; 423 or->retries = OSD_REQ_RETRIES; 424 425 return or; 426} 427EXPORT_SYMBOL(osd_start_request); 428 429static void _osd_free_seg(struct osd_request *or __unused, 430 struct _osd_req_data_segment *seg) 431{ 432 if (!seg->buff || !seg->alloc_size) 433 return; 434 435 kfree(seg->buff); 436 seg->buff = NULL; 437 seg->alloc_size = 0; 438} 439 440static void _put_request(struct request *rq) 441{ 442 /* 443 * If osd_finalize_request() was called but the request was not 444 * executed through the block layer, then we must release BIOs. 445 * TODO: Keep error code in or->async_error. Need to audit all 446 * code paths. 447 */ 448 if (unlikely(rq->bio)) 449 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq)); 450 else 451 blk_put_request(rq); 452} 453 454void osd_end_request(struct osd_request *or) 455{ 456 struct request *rq = or->request; 457 458 if (rq) { 459 if (rq->next_rq) { 460 _put_request(rq->next_rq); 461 rq->next_rq = NULL; 462 } 463 464 _put_request(rq); 465 } 466 467 _osd_free_seg(or, &or->get_attr); 468 _osd_free_seg(or, &or->enc_get_attr); 469 _osd_free_seg(or, &or->set_attr); 470 _osd_free_seg(or, &or->cdb_cont); 471 472 _osd_request_free(or); 473} 474EXPORT_SYMBOL(osd_end_request); 475 476static void _set_error_resid(struct osd_request *or, struct request *req, 477 int error) 478{ 479 or->async_error = error; 480 or->req_errors = req->errors ? : error; 481 or->sense_len = scsi_req(req)->sense_len; 482 if (or->sense_len) 483 memcpy(or->sense, scsi_req(req)->sense, or->sense_len); 484 if (or->out.req) 485 or->out.residual = scsi_req(or->out.req)->resid_len; 486 if (or->in.req) 487 or->in.residual = scsi_req(or->in.req)->resid_len; 488} 489 490int osd_execute_request(struct osd_request *or) 491{ 492 int error = blk_execute_rq(or->request->q, NULL, or->request, 0); 493 494 _set_error_resid(or, or->request, error); 495 return error; 496} 497EXPORT_SYMBOL(osd_execute_request); 498 499static void osd_request_async_done(struct request *req, int error) 500{ 501 struct osd_request *or = req->end_io_data; 502 503 _set_error_resid(or, req, error); 504 if (req->next_rq) { 505 __blk_put_request(req->q, req->next_rq); 506 req->next_rq = NULL; 507 } 508 509 __blk_put_request(req->q, req); 510 or->request = NULL; 511 or->in.req = NULL; 512 or->out.req = NULL; 513 514 if (or->async_done) 515 or->async_done(or, or->async_private); 516 else 517 osd_end_request(or); 518} 519 520int osd_execute_request_async(struct osd_request *or, 521 osd_req_done_fn *done, void *private) 522{ 523 or->request->end_io_data = or; 524 or->async_private = private; 525 or->async_done = done; 526 527 blk_execute_rq_nowait(or->request->q, NULL, or->request, 0, 528 osd_request_async_done); 529 return 0; 530} 531EXPORT_SYMBOL(osd_execute_request_async); 532 533u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT]; 534u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT]; 535 536static int _osd_realloc_seg(struct osd_request *or, 537 struct _osd_req_data_segment *seg, unsigned max_bytes) 538{ 539 void *buff; 540 541 if (seg->alloc_size >= max_bytes) 542 return 0; 543 544 buff = krealloc(seg->buff, max_bytes, or->alloc_flags); 545 if (!buff) { 546 OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes, 547 seg->alloc_size); 548 return -ENOMEM; 549 } 550 551 memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size); 552 seg->buff = buff; 553 seg->alloc_size = max_bytes; 554 return 0; 555} 556 557static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes) 558{ 559 OSD_DEBUG("total_bytes=%d\n", total_bytes); 560 return _osd_realloc_seg(or, &or->cdb_cont, total_bytes); 561} 562 563static int _alloc_set_attr_list(struct osd_request *or, 564 const struct osd_attr *oa, unsigned nelem, unsigned add_bytes) 565{ 566 unsigned total_bytes = add_bytes; 567 568 for (; nelem; --nelem, ++oa) 569 total_bytes += _osd_req_alist_elem_size(or, oa->len); 570 571 OSD_DEBUG("total_bytes=%d\n", total_bytes); 572 return _osd_realloc_seg(or, &or->set_attr, total_bytes); 573} 574 575static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes) 576{ 577 OSD_DEBUG("total_bytes=%d\n", max_bytes); 578 return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes); 579} 580 581static int _alloc_get_attr_list(struct osd_request *or) 582{ 583 OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes); 584 return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes); 585} 586 587/* 588 * Common to all OSD commands 589 */ 590 591static void _osdv1_req_encode_common(struct osd_request *or, 592 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len) 593{ 594 struct osdv1_cdb *ocdb = &or->cdb.v1; 595 596 /* 597 * For speed, the commands 598 * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C 599 * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D 600 * are not supported here. Should pass zero and set after the call 601 */ 602 act &= cpu_to_be16(~0x0080); /* V1 action code */ 603 604 OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act)); 605 606 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD; 607 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH; 608 ocdb->h.varlen_cdb.service_action = act; 609 610 ocdb->h.partition = cpu_to_be64(obj->partition); 611 ocdb->h.object = cpu_to_be64(obj->id); 612 ocdb->h.v1.length = cpu_to_be64(len); 613 ocdb->h.v1.start_address = cpu_to_be64(offset); 614} 615 616static void _osdv2_req_encode_common(struct osd_request *or, 617 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len) 618{ 619 struct osdv2_cdb *ocdb = &or->cdb.v2; 620 621 OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act)); 622 623 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD; 624 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH; 625 ocdb->h.varlen_cdb.service_action = act; 626 627 ocdb->h.partition = cpu_to_be64(obj->partition); 628 ocdb->h.object = cpu_to_be64(obj->id); 629 ocdb->h.v2.length = cpu_to_be64(len); 630 ocdb->h.v2.start_address = cpu_to_be64(offset); 631} 632 633static void _osd_req_encode_common(struct osd_request *or, 634 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len) 635{ 636 if (osd_req_is_ver1(or)) 637 _osdv1_req_encode_common(or, act, obj, offset, len); 638 else 639 _osdv2_req_encode_common(or, act, obj, offset, len); 640} 641 642/* 643 * Device commands 644 */ 645/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */ 646/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */ 647 648void osd_req_format(struct osd_request *or, u64 tot_capacity) 649{ 650 _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0, 651 tot_capacity); 652} 653EXPORT_SYMBOL(osd_req_format); 654 655int osd_req_list_dev_partitions(struct osd_request *or, 656 osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem) 657{ 658 return osd_req_list_partition_objects(or, 0, initial_id, list, nelem); 659} 660EXPORT_SYMBOL(osd_req_list_dev_partitions); 661 662static void _osd_req_encode_flush(struct osd_request *or, 663 enum osd_options_flush_scope_values op) 664{ 665 struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb); 666 667 ocdb->command_specific_options = op; 668} 669 670void osd_req_flush_obsd(struct osd_request *or, 671 enum osd_options_flush_scope_values op) 672{ 673 _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0); 674 _osd_req_encode_flush(or, op); 675} 676EXPORT_SYMBOL(osd_req_flush_obsd); 677 678/*TODO: void osd_req_perform_scsi_command(struct osd_request *, 679 const u8 *cdb, ...); */ 680/*TODO: void osd_req_task_management(struct osd_request *, ...); */ 681 682/* 683 * Partition commands 684 */ 685static void _osd_req_encode_partition(struct osd_request *or, 686 __be16 act, osd_id partition) 687{ 688 struct osd_obj_id par = { 689 .partition = partition, 690 .id = 0, 691 }; 692 693 _osd_req_encode_common(or, act, &par, 0, 0); 694} 695 696void osd_req_create_partition(struct osd_request *or, osd_id partition) 697{ 698 _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition); 699} 700EXPORT_SYMBOL(osd_req_create_partition); 701 702void osd_req_remove_partition(struct osd_request *or, osd_id partition) 703{ 704 _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition); 705} 706EXPORT_SYMBOL(osd_req_remove_partition); 707 708/*TODO: void osd_req_set_partition_key(struct osd_request *, 709 osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE], 710 u8 seed[OSD_CRYPTO_SEED_SIZE]); */ 711 712static int _osd_req_list_objects(struct osd_request *or, 713 __be16 action, const struct osd_obj_id *obj, osd_id initial_id, 714 struct osd_obj_id_list *list, unsigned nelem) 715{ 716 struct request_queue *q = osd_request_queue(or->osd_dev); 717 u64 len = nelem * sizeof(osd_id) + sizeof(*list); 718 struct bio *bio; 719 720 _osd_req_encode_common(or, action, obj, (u64)initial_id, len); 721 722 if (list->list_identifier) 723 _osd_req_encode_olist(or, list); 724 725 WARN_ON(or->in.bio); 726 bio = bio_map_kern(q, list, len, or->alloc_flags); 727 if (IS_ERR(bio)) { 728 OSD_ERR("!!! Failed to allocate list_objects BIO\n"); 729 return PTR_ERR(bio); 730 } 731 732 bio_set_op_attrs(bio, REQ_OP_READ, 0); 733 or->in.bio = bio; 734 or->in.total_bytes = bio->bi_iter.bi_size; 735 return 0; 736} 737 738int osd_req_list_partition_collections(struct osd_request *or, 739 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list, 740 unsigned nelem) 741{ 742 struct osd_obj_id par = { 743 .partition = partition, 744 .id = 0, 745 }; 746 747 return osd_req_list_collection_objects(or, &par, initial_id, list, 748 nelem); 749} 750EXPORT_SYMBOL(osd_req_list_partition_collections); 751 752int osd_req_list_partition_objects(struct osd_request *or, 753 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list, 754 unsigned nelem) 755{ 756 struct osd_obj_id par = { 757 .partition = partition, 758 .id = 0, 759 }; 760 761 return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list, 762 nelem); 763} 764EXPORT_SYMBOL(osd_req_list_partition_objects); 765 766void osd_req_flush_partition(struct osd_request *or, 767 osd_id partition, enum osd_options_flush_scope_values op) 768{ 769 _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition); 770 _osd_req_encode_flush(or, op); 771} 772EXPORT_SYMBOL(osd_req_flush_partition); 773 774/* 775 * Collection commands 776 */ 777/*TODO: void osd_req_create_collection(struct osd_request *, 778 const struct osd_obj_id *); */ 779/*TODO: void osd_req_remove_collection(struct osd_request *, 780 const struct osd_obj_id *); */ 781 782int osd_req_list_collection_objects(struct osd_request *or, 783 const struct osd_obj_id *obj, osd_id initial_id, 784 struct osd_obj_id_list *list, unsigned nelem) 785{ 786 return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj, 787 initial_id, list, nelem); 788} 789EXPORT_SYMBOL(osd_req_list_collection_objects); 790 791/*TODO: void query(struct osd_request *, ...); V2 */ 792 793void osd_req_flush_collection(struct osd_request *or, 794 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op) 795{ 796 _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0); 797 _osd_req_encode_flush(or, op); 798} 799EXPORT_SYMBOL(osd_req_flush_collection); 800 801/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */ 802/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */ 803 804/* 805 * Object commands 806 */ 807void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj) 808{ 809 _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0); 810} 811EXPORT_SYMBOL(osd_req_create_object); 812 813void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj) 814{ 815 _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0); 816} 817EXPORT_SYMBOL(osd_req_remove_object); 818 819 820/*TODO: void osd_req_create_multi(struct osd_request *or, 821 struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem); 822*/ 823 824void osd_req_write(struct osd_request *or, 825 const struct osd_obj_id *obj, u64 offset, 826 struct bio *bio, u64 len) 827{ 828 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); 829 WARN_ON(or->out.bio || or->out.total_bytes); 830 WARN_ON(!op_is_write(bio_op(bio))); 831 or->out.bio = bio; 832 or->out.total_bytes = len; 833} 834EXPORT_SYMBOL(osd_req_write); 835 836int osd_req_write_kern(struct osd_request *or, 837 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) 838{ 839 struct request_queue *req_q = osd_request_queue(or->osd_dev); 840 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); 841 842 if (IS_ERR(bio)) 843 return PTR_ERR(bio); 844 845 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 846 osd_req_write(or, obj, offset, bio, len); 847 return 0; 848} 849EXPORT_SYMBOL(osd_req_write_kern); 850 851/*TODO: void osd_req_append(struct osd_request *, 852 const struct osd_obj_id *, struct bio *data_out); */ 853/*TODO: void osd_req_create_write(struct osd_request *, 854 const struct osd_obj_id *, struct bio *data_out, u64 offset); */ 855/*TODO: void osd_req_clear(struct osd_request *, 856 const struct osd_obj_id *, u64 offset, u64 len); */ 857/*TODO: void osd_req_punch(struct osd_request *, 858 const struct osd_obj_id *, u64 offset, u64 len); V2 */ 859 860void osd_req_flush_object(struct osd_request *or, 861 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op, 862 /*V2*/ u64 offset, /*V2*/ u64 len) 863{ 864 if (unlikely(osd_req_is_ver1(or) && (offset || len))) { 865 OSD_DEBUG("OSD Ver1 flush on specific range ignored\n"); 866 offset = 0; 867 len = 0; 868 } 869 870 _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len); 871 _osd_req_encode_flush(or, op); 872} 873EXPORT_SYMBOL(osd_req_flush_object); 874 875void osd_req_read(struct osd_request *or, 876 const struct osd_obj_id *obj, u64 offset, 877 struct bio *bio, u64 len) 878{ 879 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 880 WARN_ON(or->in.bio || or->in.total_bytes); 881 WARN_ON(op_is_write(bio_op(bio))); 882 or->in.bio = bio; 883 or->in.total_bytes = len; 884} 885EXPORT_SYMBOL(osd_req_read); 886 887int osd_req_read_kern(struct osd_request *or, 888 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) 889{ 890 struct request_queue *req_q = osd_request_queue(or->osd_dev); 891 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); 892 893 if (IS_ERR(bio)) 894 return PTR_ERR(bio); 895 896 osd_req_read(or, obj, offset, bio, len); 897 return 0; 898} 899EXPORT_SYMBOL(osd_req_read_kern); 900 901static int _add_sg_continuation_descriptor(struct osd_request *or, 902 const struct osd_sg_entry *sglist, unsigned numentries, u64 *len) 903{ 904 struct osd_sg_continuation_descriptor *oscd; 905 u32 oscd_size; 906 unsigned i; 907 int ret; 908 909 oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]); 910 911 if (!or->cdb_cont.total_bytes) { 912 /* First time, jump over the header, we will write to: 913 * cdb_cont.buff + cdb_cont.total_bytes 914 */ 915 or->cdb_cont.total_bytes = 916 sizeof(struct osd_continuation_segment_header); 917 } 918 919 ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size); 920 if (unlikely(ret)) 921 return ret; 922 923 oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes; 924 oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST); 925 oscd->hdr.pad_length = 0; 926 oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd)); 927 928 *len = 0; 929 /* copy the sg entries and convert to network byte order */ 930 for (i = 0; i < numentries; i++) { 931 oscd->entries[i].offset = cpu_to_be64(sglist[i].offset); 932 oscd->entries[i].len = cpu_to_be64(sglist[i].len); 933 *len += sglist[i].len; 934 } 935 936 or->cdb_cont.total_bytes += oscd_size; 937 OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n", 938 or->cdb_cont.total_bytes, oscd_size, numentries); 939 return 0; 940} 941 942static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) 943{ 944 struct request_queue *req_q = osd_request_queue(or->osd_dev); 945 struct bio *bio; 946 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 947 struct osd_continuation_segment_header *cont_seg_hdr; 948 949 if (!or->cdb_cont.total_bytes) 950 return 0; 951 952 cont_seg_hdr = or->cdb_cont.buff; 953 cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2; 954 cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action; 955 956 /* create a bio for continuation segment */ 957 bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, 958 GFP_KERNEL); 959 if (IS_ERR(bio)) 960 return PTR_ERR(bio); 961 962 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 963 964 /* integrity check the continuation before the bio is linked 965 * with the other data segments since the continuation 966 * integrity is separate from the other data segments. 967 */ 968 osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key); 969 970 cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes); 971 972 /* we can't use _req_append_segment, because we need to link in the 973 * continuation bio to the head of the bio list - the 974 * continuation segment (if it exists) is always the first segment in 975 * the out data buffer. 976 */ 977 bio->bi_next = or->out.bio; 978 or->out.bio = bio; 979 or->out.total_bytes += or->cdb_cont.total_bytes; 980 981 return 0; 982} 983 984/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an 985 * @sglist that has the scatter gather entries. Scatter-gather enables a write 986 * of multiple none-contiguous areas of an object, in a single call. The extents 987 * may overlap and/or be in any order. The only constrain is that: 988 * total_bytes(sglist) >= total_bytes(bio) 989 */ 990int osd_req_write_sg(struct osd_request *or, 991 const struct osd_obj_id *obj, struct bio *bio, 992 const struct osd_sg_entry *sglist, unsigned numentries) 993{ 994 u64 len; 995 int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); 996 997 if (ret) 998 return ret; 999 osd_req_write(or, obj, 0, bio, len); 1000 1001 return 0; 1002} 1003EXPORT_SYMBOL(osd_req_write_sg); 1004 1005/* osd_req_read_sg: Read multiple extents of an object into @bio 1006 * See osd_req_write_sg 1007 */ 1008int osd_req_read_sg(struct osd_request *or, 1009 const struct osd_obj_id *obj, struct bio *bio, 1010 const struct osd_sg_entry *sglist, unsigned numentries) 1011{ 1012 u64 len; 1013 u64 off; 1014 int ret; 1015 1016 if (numentries > 1) { 1017 off = 0; 1018 ret = _add_sg_continuation_descriptor(or, sglist, numentries, 1019 &len); 1020 if (ret) 1021 return ret; 1022 } else { 1023 /* Optimize the case of single segment, read_sg is a 1024 * bidi operation. 1025 */ 1026 len = sglist->len; 1027 off = sglist->offset; 1028 } 1029 osd_req_read(or, obj, off, bio, len); 1030 1031 return 0; 1032} 1033EXPORT_SYMBOL(osd_req_read_sg); 1034 1035/* SG-list write/read Kern API 1036 * 1037 * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array 1038 * of sg_entries. @numentries indicates how many pointers and sg_entries there 1039 * are. By requiring an array of buff pointers. This allows a caller to do a 1040 * single write/read and scatter into multiple buffers. 1041 * NOTE: Each buffer + len should not cross a page boundary. 1042 */ 1043static struct bio *_create_sg_bios(struct osd_request *or, 1044 void **buff, const struct osd_sg_entry *sglist, unsigned numentries) 1045{ 1046 struct request_queue *q = osd_request_queue(or->osd_dev); 1047 struct bio *bio; 1048 unsigned i; 1049 1050 bio = bio_kmalloc(GFP_KERNEL, numentries); 1051 if (unlikely(!bio)) { 1052 OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries); 1053 return ERR_PTR(-ENOMEM); 1054 } 1055 1056 for (i = 0; i < numentries; i++) { 1057 unsigned offset = offset_in_page(buff[i]); 1058 struct page *page = virt_to_page(buff[i]); 1059 unsigned len = sglist[i].len; 1060 unsigned added_len; 1061 1062 BUG_ON(offset + len > PAGE_SIZE); 1063 added_len = bio_add_pc_page(q, bio, page, len, offset); 1064 if (unlikely(len != added_len)) { 1065 OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n", 1066 len, added_len); 1067 bio_put(bio); 1068 return ERR_PTR(-ENOMEM); 1069 } 1070 } 1071 1072 return bio; 1073} 1074 1075int osd_req_write_sg_kern(struct osd_request *or, 1076 const struct osd_obj_id *obj, void **buff, 1077 const struct osd_sg_entry *sglist, unsigned numentries) 1078{ 1079 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); 1080 if (IS_ERR(bio)) 1081 return PTR_ERR(bio); 1082 1083 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1084 osd_req_write_sg(or, obj, bio, sglist, numentries); 1085 1086 return 0; 1087} 1088EXPORT_SYMBOL(osd_req_write_sg_kern); 1089 1090int osd_req_read_sg_kern(struct osd_request *or, 1091 const struct osd_obj_id *obj, void **buff, 1092 const struct osd_sg_entry *sglist, unsigned numentries) 1093{ 1094 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); 1095 if (IS_ERR(bio)) 1096 return PTR_ERR(bio); 1097 1098 osd_req_read_sg(or, obj, bio, sglist, numentries); 1099 1100 return 0; 1101} 1102EXPORT_SYMBOL(osd_req_read_sg_kern); 1103 1104 1105 1106void osd_req_get_attributes(struct osd_request *or, 1107 const struct osd_obj_id *obj) 1108{ 1109 _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0); 1110} 1111EXPORT_SYMBOL(osd_req_get_attributes); 1112 1113void osd_req_set_attributes(struct osd_request *or, 1114 const struct osd_obj_id *obj) 1115{ 1116 _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0); 1117} 1118EXPORT_SYMBOL(osd_req_set_attributes); 1119 1120/* 1121 * Attributes List-mode 1122 */ 1123 1124int osd_req_add_set_attr_list(struct osd_request *or, 1125 const struct osd_attr *oa, unsigned nelem) 1126{ 1127 unsigned total_bytes = or->set_attr.total_bytes; 1128 void *attr_last; 1129 int ret; 1130 1131 if (or->attributes_mode && 1132 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) { 1133 WARN_ON(1); 1134 return -EINVAL; 1135 } 1136 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS; 1137 1138 if (!total_bytes) { /* first-time: allocate and put list header */ 1139 total_bytes = _osd_req_sizeof_alist_header(or); 1140 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes); 1141 if (ret) 1142 return ret; 1143 _osd_req_set_alist_type(or, or->set_attr.buff, 1144 OSD_ATTR_LIST_SET_RETRIEVE); 1145 } 1146 attr_last = or->set_attr.buff + total_bytes; 1147 1148 for (; nelem; --nelem) { 1149 unsigned elem_size = _osd_req_alist_elem_size(or, oa->len); 1150 1151 total_bytes += elem_size; 1152 if (unlikely(or->set_attr.alloc_size < total_bytes)) { 1153 or->set_attr.total_bytes = total_bytes - elem_size; 1154 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes); 1155 if (ret) 1156 return ret; 1157 attr_last = 1158 or->set_attr.buff + or->set_attr.total_bytes; 1159 } 1160 1161 _osd_req_alist_elem_encode(or, attr_last, oa); 1162 1163 attr_last += elem_size; 1164 ++oa; 1165 } 1166 1167 or->set_attr.total_bytes = total_bytes; 1168 return 0; 1169} 1170EXPORT_SYMBOL(osd_req_add_set_attr_list); 1171 1172static int _req_append_segment(struct osd_request *or, 1173 unsigned padding, struct _osd_req_data_segment *seg, 1174 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 1175{ 1176 void *pad_buff; 1177 int ret; 1178 1179 if (padding) { 1180 /* check if we can just add it to last buffer */ 1181 if (last_seg && 1182 (padding <= last_seg->alloc_size - last_seg->total_bytes)) 1183 pad_buff = last_seg->buff + last_seg->total_bytes; 1184 else 1185 pad_buff = io->pad_buff; 1186 1187 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding, 1188 or->alloc_flags); 1189 if (ret) 1190 return ret; 1191 io->total_bytes += padding; 1192 } 1193 1194 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes, 1195 or->alloc_flags); 1196 if (ret) 1197 return ret; 1198 1199 io->total_bytes += seg->total_bytes; 1200 OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff, 1201 seg->total_bytes); 1202 return 0; 1203} 1204 1205static int _osd_req_finalize_set_attr_list(struct osd_request *or) 1206{ 1207 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1208 unsigned padding; 1209 int ret; 1210 1211 if (!or->set_attr.total_bytes) { 1212 cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED; 1213 return 0; 1214 } 1215 1216 cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes); 1217 cdbh->attrs_list.set_attr_offset = 1218 osd_req_encode_offset(or, or->out.total_bytes, &padding); 1219 1220 ret = _req_append_segment(or, padding, &or->set_attr, 1221 or->out.last_seg, &or->out); 1222 if (ret) 1223 return ret; 1224 1225 or->out.last_seg = &or->set_attr; 1226 return 0; 1227} 1228 1229int osd_req_add_get_attr_list(struct osd_request *or, 1230 const struct osd_attr *oa, unsigned nelem) 1231{ 1232 unsigned total_bytes = or->enc_get_attr.total_bytes; 1233 void *attr_last; 1234 int ret; 1235 1236 if (or->attributes_mode && 1237 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) { 1238 WARN_ON(1); 1239 return -EINVAL; 1240 } 1241 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS; 1242 1243 /* first time calc data-in list header size */ 1244 if (!or->get_attr.total_bytes) 1245 or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or); 1246 1247 /* calc data-out info */ 1248 if (!total_bytes) { /* first-time: allocate and put list header */ 1249 unsigned max_bytes; 1250 1251 total_bytes = _osd_req_sizeof_alist_header(or); 1252 max_bytes = total_bytes + 1253 nelem * sizeof(struct osd_attributes_list_attrid); 1254 ret = _alloc_get_attr_desc(or, max_bytes); 1255 if (ret) 1256 return ret; 1257 1258 _osd_req_set_alist_type(or, or->enc_get_attr.buff, 1259 OSD_ATTR_LIST_GET); 1260 } 1261 attr_last = or->enc_get_attr.buff + total_bytes; 1262 1263 for (; nelem; --nelem) { 1264 struct osd_attributes_list_attrid *attrid; 1265 const unsigned cur_size = sizeof(*attrid); 1266 1267 total_bytes += cur_size; 1268 if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) { 1269 or->enc_get_attr.total_bytes = total_bytes - cur_size; 1270 ret = _alloc_get_attr_desc(or, 1271 total_bytes + nelem * sizeof(*attrid)); 1272 if (ret) 1273 return ret; 1274 attr_last = or->enc_get_attr.buff + 1275 or->enc_get_attr.total_bytes; 1276 } 1277 1278 attrid = attr_last; 1279 attrid->attr_page = cpu_to_be32(oa->attr_page); 1280 attrid->attr_id = cpu_to_be32(oa->attr_id); 1281 1282 attr_last += cur_size; 1283 1284 /* calc data-in size */ 1285 or->get_attr.total_bytes += 1286 _osd_req_alist_elem_size(or, oa->len); 1287 ++oa; 1288 } 1289 1290 or->enc_get_attr.total_bytes = total_bytes; 1291 1292 OSD_DEBUG( 1293 "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%zu)\n", 1294 or->get_attr.total_bytes, 1295 or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or), 1296 or->enc_get_attr.total_bytes, 1297 (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or)) 1298 / sizeof(struct osd_attributes_list_attrid)); 1299 1300 return 0; 1301} 1302EXPORT_SYMBOL(osd_req_add_get_attr_list); 1303 1304static int _osd_req_finalize_get_attr_list(struct osd_request *or) 1305{ 1306 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1307 unsigned out_padding; 1308 unsigned in_padding; 1309 int ret; 1310 1311 if (!or->enc_get_attr.total_bytes) { 1312 cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED; 1313 cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED; 1314 return 0; 1315 } 1316 1317 ret = _alloc_get_attr_list(or); 1318 if (ret) 1319 return ret; 1320 1321 /* The out-going buffer info update */ 1322 OSD_DEBUG("out-going\n"); 1323 cdbh->attrs_list.get_attr_desc_bytes = 1324 cpu_to_be32(or->enc_get_attr.total_bytes); 1325 1326 cdbh->attrs_list.get_attr_desc_offset = 1327 osd_req_encode_offset(or, or->out.total_bytes, &out_padding); 1328 1329 ret = _req_append_segment(or, out_padding, &or->enc_get_attr, 1330 or->out.last_seg, &or->out); 1331 if (ret) 1332 return ret; 1333 or->out.last_seg = &or->enc_get_attr; 1334 1335 /* The incoming buffer info update */ 1336 OSD_DEBUG("in-coming\n"); 1337 cdbh->attrs_list.get_attr_alloc_length = 1338 cpu_to_be32(or->get_attr.total_bytes); 1339 1340 cdbh->attrs_list.get_attr_offset = 1341 osd_req_encode_offset(or, or->in.total_bytes, &in_padding); 1342 1343 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL, 1344 &or->in); 1345 if (ret) 1346 return ret; 1347 or->in.last_seg = &or->get_attr; 1348 1349 return 0; 1350} 1351 1352int osd_req_decode_get_attr_list(struct osd_request *or, 1353 struct osd_attr *oa, int *nelem, void **iterator) 1354{ 1355 unsigned cur_bytes, returned_bytes; 1356 int n; 1357 const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or); 1358 void *cur_p; 1359 1360 if (!_osd_req_is_alist_type(or, or->get_attr.buff, 1361 OSD_ATTR_LIST_SET_RETRIEVE)) { 1362 oa->attr_page = 0; 1363 oa->attr_id = 0; 1364 oa->val_ptr = NULL; 1365 oa->len = 0; 1366 *iterator = NULL; 1367 return 0; 1368 } 1369 1370 if (*iterator) { 1371 BUG_ON((*iterator < or->get_attr.buff) || 1372 (or->get_attr.buff + or->get_attr.alloc_size < *iterator)); 1373 cur_p = *iterator; 1374 cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list; 1375 returned_bytes = or->get_attr.total_bytes; 1376 } else { /* first time decode the list header */ 1377 cur_bytes = sizeof_attr_list; 1378 returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) + 1379 sizeof_attr_list; 1380 1381 cur_p = or->get_attr.buff + sizeof_attr_list; 1382 1383 if (returned_bytes > or->get_attr.alloc_size) { 1384 OSD_DEBUG("target report: space was not big enough! " 1385 "Allocate=%u Needed=%u\n", 1386 or->get_attr.alloc_size, 1387 returned_bytes + sizeof_attr_list); 1388 1389 returned_bytes = 1390 or->get_attr.alloc_size - sizeof_attr_list; 1391 } 1392 or->get_attr.total_bytes = returned_bytes; 1393 } 1394 1395 for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) { 1396 int inc = _osd_req_alist_elem_decode(or, cur_p, oa, 1397 returned_bytes - cur_bytes); 1398 1399 if (inc < 0) { 1400 OSD_ERR("BAD FOOD from target. list not valid!" 1401 "c=%d r=%d n=%d\n", 1402 cur_bytes, returned_bytes, n); 1403 oa->val_ptr = NULL; 1404 cur_bytes = returned_bytes; /* break the caller loop */ 1405 break; 1406 } 1407 1408 cur_bytes += inc; 1409 cur_p += inc; 1410 ++oa; 1411 } 1412 1413 *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL; 1414 *nelem = n; 1415 return returned_bytes - cur_bytes; 1416} 1417EXPORT_SYMBOL(osd_req_decode_get_attr_list); 1418 1419/* 1420 * Attributes Page-mode 1421 */ 1422 1423int osd_req_add_get_attr_page(struct osd_request *or, 1424 u32 page_id, void *attar_page, unsigned max_page_len, 1425 const struct osd_attr *set_one_attr) 1426{ 1427 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1428 1429 if (or->attributes_mode && 1430 or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) { 1431 WARN_ON(1); 1432 return -EINVAL; 1433 } 1434 or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE; 1435 1436 or->get_attr.buff = attar_page; 1437 or->get_attr.total_bytes = max_page_len; 1438 1439 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id); 1440 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len); 1441 1442 if (!set_one_attr || !set_one_attr->attr_page) 1443 return 0; /* The set is optional */ 1444 1445 or->set_attr.buff = set_one_attr->val_ptr; 1446 or->set_attr.total_bytes = set_one_attr->len; 1447 1448 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page); 1449 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id); 1450 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len); 1451 return 0; 1452} 1453EXPORT_SYMBOL(osd_req_add_get_attr_page); 1454 1455static int _osd_req_finalize_attr_page(struct osd_request *or) 1456{ 1457 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1458 unsigned in_padding, out_padding; 1459 int ret; 1460 1461 /* returned page */ 1462 cdbh->attrs_page.get_attr_offset = 1463 osd_req_encode_offset(or, or->in.total_bytes, &in_padding); 1464 1465 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL, 1466 &or->in); 1467 if (ret) 1468 return ret; 1469 1470 if (or->set_attr.total_bytes == 0) 1471 return 0; 1472 1473 /* set one value */ 1474 cdbh->attrs_page.set_attr_offset = 1475 osd_req_encode_offset(or, or->out.total_bytes, &out_padding); 1476 1477 ret = _req_append_segment(or, out_padding, &or->set_attr, NULL, 1478 &or->out); 1479 return ret; 1480} 1481 1482static inline void osd_sec_parms_set_out_offset(bool is_v1, 1483 struct osd_security_parameters *sec_parms, osd_cdb_offset offset) 1484{ 1485 if (is_v1) 1486 sec_parms->v1.data_out_integrity_check_offset = offset; 1487 else 1488 sec_parms->v2.data_out_integrity_check_offset = offset; 1489} 1490 1491static inline void osd_sec_parms_set_in_offset(bool is_v1, 1492 struct osd_security_parameters *sec_parms, osd_cdb_offset offset) 1493{ 1494 if (is_v1) 1495 sec_parms->v1.data_in_integrity_check_offset = offset; 1496 else 1497 sec_parms->v2.data_in_integrity_check_offset = offset; 1498} 1499 1500static int _osd_req_finalize_data_integrity(struct osd_request *or, 1501 bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes, 1502 const u8 *cap_key) 1503{ 1504 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1505 int ret; 1506 1507 if (!osd_is_sec_alldata(sec_parms)) 1508 return 0; 1509 1510 if (has_out) { 1511 struct _osd_req_data_segment seg = { 1512 .buff = &or->out_data_integ, 1513 .total_bytes = sizeof(or->out_data_integ), 1514 }; 1515 unsigned pad; 1516 1517 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes); 1518 or->out_data_integ.set_attributes_bytes = cpu_to_be64( 1519 or->set_attr.total_bytes); 1520 or->out_data_integ.get_attributes_bytes = cpu_to_be64( 1521 or->enc_get_attr.total_bytes); 1522 1523 osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms, 1524 osd_req_encode_offset(or, or->out.total_bytes, &pad)); 1525 1526 ret = _req_append_segment(or, pad, &seg, or->out.last_seg, 1527 &or->out); 1528 if (ret) 1529 return ret; 1530 or->out.last_seg = NULL; 1531 1532 /* they are now all chained to request sign them all together */ 1533 osd_sec_sign_data(&or->out_data_integ, out_data_bio, 1534 cap_key); 1535 } 1536 1537 if (has_in) { 1538 struct _osd_req_data_segment seg = { 1539 .buff = &or->in_data_integ, 1540 .total_bytes = sizeof(or->in_data_integ), 1541 }; 1542 unsigned pad; 1543 1544 osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms, 1545 osd_req_encode_offset(or, or->in.total_bytes, &pad)); 1546 1547 ret = _req_append_segment(or, pad, &seg, or->in.last_seg, 1548 &or->in); 1549 if (ret) 1550 return ret; 1551 1552 or->in.last_seg = NULL; 1553 } 1554 1555 return 0; 1556} 1557 1558/* 1559 * osd_finalize_request and helpers 1560 */ 1561static struct request *_make_request(struct request_queue *q, bool has_write, 1562 struct _osd_io_info *oii, gfp_t flags) 1563{ 1564 struct request *req; 1565 struct bio *bio = oii->bio; 1566 int ret; 1567 1568 req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 1569 flags); 1570 if (IS_ERR(req)) 1571 return req; 1572 scsi_req_init(req); 1573 1574 for_each_bio(bio) { 1575 struct bio *bounce_bio = bio; 1576 1577 blk_queue_bounce(req->q, &bounce_bio); 1578 ret = blk_rq_append_bio(req, bounce_bio); 1579 if (ret) 1580 return ERR_PTR(ret); 1581 } 1582 1583 return req; 1584} 1585 1586static int _init_blk_request(struct osd_request *or, 1587 bool has_in, bool has_out) 1588{ 1589 gfp_t flags = or->alloc_flags; 1590 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1591 struct request_queue *q = scsi_device->request_queue; 1592 struct request *req; 1593 int ret; 1594 1595 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags); 1596 if (IS_ERR(req)) { 1597 ret = PTR_ERR(req); 1598 goto out; 1599 } 1600 1601 or->request = req; 1602 req->rq_flags |= RQF_QUIET; 1603 1604 req->timeout = or->timeout; 1605 req->retries = or->retries; 1606 1607 if (has_out) { 1608 or->out.req = req; 1609 if (has_in) { 1610 /* allocate bidi request */ 1611 req = _make_request(q, false, &or->in, flags); 1612 if (IS_ERR(req)) { 1613 OSD_DEBUG("blk_get_request for bidi failed\n"); 1614 ret = PTR_ERR(req); 1615 goto out; 1616 } 1617 scsi_req_init(req); 1618 or->in.req = or->request->next_rq = req; 1619 } 1620 } else if (has_in) 1621 or->in.req = req; 1622 1623 ret = 0; 1624out: 1625 OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n", 1626 or, has_in, has_out, ret, or->request); 1627 return ret; 1628} 1629 1630int osd_finalize_request(struct osd_request *or, 1631 u8 options, const void *cap, const u8 *cap_key) 1632{ 1633 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1634 bool has_in, has_out; 1635 /* Save for data_integrity without the cdb_continuation */ 1636 struct bio *out_data_bio = or->out.bio; 1637 u64 out_data_bytes = or->out.total_bytes; 1638 int ret; 1639 1640 if (options & OSD_REQ_FUA) 1641 cdbh->options |= OSD_CDB_FUA; 1642 1643 if (options & OSD_REQ_DPO) 1644 cdbh->options |= OSD_CDB_DPO; 1645 1646 if (options & OSD_REQ_BYPASS_TIMESTAMPS) 1647 cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS; 1648 1649 osd_set_caps(&or->cdb, cap); 1650 1651 has_in = or->in.bio || or->get_attr.total_bytes; 1652 has_out = or->out.bio || or->cdb_cont.total_bytes || 1653 or->set_attr.total_bytes || or->enc_get_attr.total_bytes; 1654 1655 ret = _osd_req_finalize_cdb_cont(or, cap_key); 1656 if (ret) { 1657 OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n"); 1658 return ret; 1659 } 1660 ret = _init_blk_request(or, has_in, has_out); 1661 if (ret) { 1662 OSD_DEBUG("_init_blk_request failed\n"); 1663 return ret; 1664 } 1665 1666 or->out.pad_buff = sg_out_pad_buffer; 1667 or->in.pad_buff = sg_in_pad_buffer; 1668 1669 if (!or->attributes_mode) 1670 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS; 1671 cdbh->command_specific_options |= or->attributes_mode; 1672 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) { 1673 ret = _osd_req_finalize_attr_page(or); 1674 if (ret) { 1675 OSD_DEBUG("_osd_req_finalize_attr_page failed\n"); 1676 return ret; 1677 } 1678 } else { 1679 /* TODO: I think that for the GET_ATTR command these 2 should 1680 * be reversed to keep them in execution order (for embedded 1681 * targets with low memory footprint) 1682 */ 1683 ret = _osd_req_finalize_set_attr_list(or); 1684 if (ret) { 1685 OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n"); 1686 return ret; 1687 } 1688 1689 ret = _osd_req_finalize_get_attr_list(or); 1690 if (ret) { 1691 OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n"); 1692 return ret; 1693 } 1694 } 1695 1696 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, 1697 out_data_bio, out_data_bytes, 1698 cap_key); 1699 if (ret) 1700 return ret; 1701 1702 osd_sec_sign_cdb(&or->cdb, cap_key); 1703 1704 scsi_req(or->request)->cmd = or->cdb.buff; 1705 scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or); 1706 1707 return 0; 1708} 1709EXPORT_SYMBOL(osd_finalize_request); 1710 1711static bool _is_osd_security_code(int code) 1712{ 1713 return (code == osd_security_audit_value_frozen) || 1714 (code == osd_security_working_key_frozen) || 1715 (code == osd_nonce_not_unique) || 1716 (code == osd_nonce_timestamp_out_of_range) || 1717 (code == osd_invalid_dataout_buffer_integrity_check_value); 1718} 1719 1720#define OSD_SENSE_PRINT1(fmt, a...) \ 1721 do { \ 1722 if (__cur_sense_need_output) \ 1723 OSD_ERR(fmt, ##a); \ 1724 } while (0) 1725 1726#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a) 1727 1728int osd_req_decode_sense_full(struct osd_request *or, 1729 struct osd_sense_info *osi, bool silent, 1730 struct osd_obj_id *bad_obj_list __unused, int max_obj __unused, 1731 struct osd_attr *bad_attr_list, int max_attr) 1732{ 1733 int sense_len, original_sense_len; 1734 struct osd_sense_info local_osi; 1735 struct scsi_sense_descriptor_based *ssdb; 1736 void *cur_descriptor; 1737#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0) 1738 const bool __cur_sense_need_output = false; 1739#else 1740 bool __cur_sense_need_output = !silent; 1741#endif 1742 int ret; 1743 1744 if (likely(!or->req_errors)) 1745 return 0; 1746 1747 osi = osi ? : &local_osi; 1748 memset(osi, 0, sizeof(*osi)); 1749 1750 ssdb = (typeof(ssdb))or->sense; 1751 sense_len = or->sense_len; 1752 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { 1753 OSD_ERR("Block-layer returned error(0x%x) but " 1754 "sense_len(%u) || key(%d) is empty\n", 1755 or->req_errors, sense_len, ssdb->sense_key); 1756 goto analyze; 1757 } 1758 1759 if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) { 1760 OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n", 1761 ssdb->response_code, sense_len); 1762 goto analyze; 1763 } 1764 1765 osi->key = ssdb->sense_key; 1766 osi->additional_code = be16_to_cpu(ssdb->additional_sense_code); 1767 original_sense_len = ssdb->additional_sense_length + 8; 1768 1769#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1) 1770 if (__cur_sense_need_output) 1771 __cur_sense_need_output = (osi->key > scsi_sk_recovered_error); 1772#endif 1773 OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) " 1774 "additional_code=0x%x async_error=%d errors=0x%x\n", 1775 osi->key, original_sense_len, sense_len, 1776 osi->additional_code, or->async_error, 1777 or->req_errors); 1778 1779 if (original_sense_len < sense_len) 1780 sense_len = original_sense_len; 1781 1782 cur_descriptor = ssdb->ssd; 1783 sense_len -= sizeof(*ssdb); 1784 while (sense_len > 0) { 1785 struct scsi_sense_descriptor *ssd = cur_descriptor; 1786 int cur_len = ssd->additional_length + 2; 1787 1788 sense_len -= cur_len; 1789 1790 if (sense_len < 0) 1791 break; /* sense was truncated */ 1792 1793 switch (ssd->descriptor_type) { 1794 case scsi_sense_information: 1795 case scsi_sense_command_specific_information: 1796 { 1797 struct scsi_sense_command_specific_data_descriptor 1798 *sscd = cur_descriptor; 1799 1800 osi->command_info = 1801 get_unaligned_be64(&sscd->information) ; 1802 OSD_SENSE_PRINT2( 1803 "command_specific_information 0x%llx \n", 1804 _LLU(osi->command_info)); 1805 break; 1806 } 1807 case scsi_sense_key_specific: 1808 { 1809 struct scsi_sense_key_specific_data_descriptor 1810 *ssks = cur_descriptor; 1811 1812 osi->sense_info = get_unaligned_be16(&ssks->value); 1813 OSD_SENSE_PRINT2( 1814 "sense_key_specific_information %u" 1815 "sksv_cd_bpv_bp (0x%x)\n", 1816 osi->sense_info, ssks->sksv_cd_bpv_bp); 1817 break; 1818 } 1819 case osd_sense_object_identification: 1820 { /*FIXME: Keep first not last, Store in array*/ 1821 struct osd_sense_identification_data_descriptor 1822 *osidd = cur_descriptor; 1823 1824 osi->not_initiated_command_functions = 1825 le32_to_cpu(osidd->not_initiated_functions); 1826 osi->completed_command_functions = 1827 le32_to_cpu(osidd->completed_functions); 1828 osi->obj.partition = be64_to_cpu(osidd->partition_id); 1829 osi->obj.id = be64_to_cpu(osidd->object_id); 1830 OSD_SENSE_PRINT2( 1831 "object_identification pid=0x%llx oid=0x%llx\n", 1832 _LLU(osi->obj.partition), _LLU(osi->obj.id)); 1833 OSD_SENSE_PRINT2( 1834 "not_initiated_bits(%x) " 1835 "completed_command_bits(%x)\n", 1836 osi->not_initiated_command_functions, 1837 osi->completed_command_functions); 1838 break; 1839 } 1840 case osd_sense_response_integrity_check: 1841 { 1842 struct osd_sense_response_integrity_check_descriptor 1843 *osricd = cur_descriptor; 1844 const unsigned len = 1845 sizeof(osricd->integrity_check_value); 1846 char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */ 1847 1848 hex_dump_to_buffer(osricd->integrity_check_value, len, 1849 32, 1, key_dump, sizeof(key_dump), true); 1850 OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump); 1851 } 1852 case osd_sense_attribute_identification: 1853 { 1854 struct osd_sense_attributes_data_descriptor 1855 *osadd = cur_descriptor; 1856 unsigned len = min(cur_len, sense_len); 1857 struct osd_sense_attr *pattr = osadd->sense_attrs; 1858 1859 while (len >= sizeof(*pattr)) { 1860 u32 attr_page = be32_to_cpu(pattr->attr_page); 1861 u32 attr_id = be32_to_cpu(pattr->attr_id); 1862 1863 if (!osi->attr.attr_page) { 1864 osi->attr.attr_page = attr_page; 1865 osi->attr.attr_id = attr_id; 1866 } 1867 1868 if (bad_attr_list && max_attr) { 1869 bad_attr_list->attr_page = attr_page; 1870 bad_attr_list->attr_id = attr_id; 1871 bad_attr_list++; 1872 max_attr--; 1873 } 1874 1875 len -= sizeof(*pattr); 1876 OSD_SENSE_PRINT2( 1877 "osd_sense_attribute_identification" 1878 "attr_page=0x%x attr_id=0x%x\n", 1879 attr_page, attr_id); 1880 } 1881 } 1882 /*These are not legal for OSD*/ 1883 case scsi_sense_field_replaceable_unit: 1884 OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n"); 1885 break; 1886 case scsi_sense_stream_commands: 1887 OSD_SENSE_PRINT2("scsi_sense_stream_commands\n"); 1888 break; 1889 case scsi_sense_block_commands: 1890 OSD_SENSE_PRINT2("scsi_sense_block_commands\n"); 1891 break; 1892 case scsi_sense_ata_return: 1893 OSD_SENSE_PRINT2("scsi_sense_ata_return\n"); 1894 break; 1895 default: 1896 if (ssd->descriptor_type <= scsi_sense_Reserved_last) 1897 OSD_SENSE_PRINT2( 1898 "scsi_sense Reserved descriptor (0x%x)", 1899 ssd->descriptor_type); 1900 else 1901 OSD_SENSE_PRINT2( 1902 "scsi_sense Vendor descriptor (0x%x)", 1903 ssd->descriptor_type); 1904 } 1905 1906 cur_descriptor += cur_len; 1907 } 1908 1909analyze: 1910 if (!osi->key) { 1911 /* scsi sense is Empty, the request was never issued to target 1912 * linux return code might tell us what happened. 1913 */ 1914 if (or->async_error == -ENOMEM) 1915 osi->osd_err_pri = OSD_ERR_PRI_RESOURCE; 1916 else 1917 osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE; 1918 ret = or->async_error; 1919 } else if (osi->key <= scsi_sk_recovered_error) { 1920 osi->osd_err_pri = 0; 1921 ret = 0; 1922 } else if (osi->additional_code == scsi_invalid_field_in_cdb) { 1923 if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) { 1924 osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES; 1925 ret = -EFAULT; /* caller should recover from this */ 1926 } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) { 1927 osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND; 1928 ret = -ENOENT; 1929 } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) { 1930 osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS; 1931 ret = -EACCES; 1932 } else { 1933 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; 1934 ret = -EINVAL; 1935 } 1936 } else if (osi->additional_code == osd_quota_error) { 1937 osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE; 1938 ret = -ENOSPC; 1939 } else if (_is_osd_security_code(osi->additional_code)) { 1940 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; 1941 ret = -EINVAL; 1942 } else { 1943 osi->osd_err_pri = OSD_ERR_PRI_EIO; 1944 ret = -EIO; 1945 } 1946 1947 if (!or->out.residual) 1948 or->out.residual = or->out.total_bytes; 1949 if (!or->in.residual) 1950 or->in.residual = or->in.total_bytes; 1951 1952 return ret; 1953} 1954EXPORT_SYMBOL(osd_req_decode_sense_full); 1955 1956/* 1957 * Implementation of osd_sec.h API 1958 * TODO: Move to a separate osd_sec.c file at a later stage. 1959 */ 1960 1961enum { OSD_SEC_CAP_V1_ALL_CAPS = 1962 OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE | 1963 OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR | 1964 OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC | 1965 OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT 1966}; 1967 1968enum { OSD_SEC_CAP_V2_ALL_CAPS = 1969 OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT 1970}; 1971 1972void osd_sec_init_nosec_doall_caps(void *caps, 1973 const struct osd_obj_id *obj, bool is_collection, const bool is_v1) 1974{ 1975 struct osd_capability *cap = caps; 1976 u8 type; 1977 u8 descriptor_type; 1978 1979 if (likely(obj->id)) { 1980 if (unlikely(is_collection)) { 1981 type = OSD_SEC_OBJ_COLLECTION; 1982 descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ : 1983 OSD_SEC_OBJ_DESC_COL; 1984 } else { 1985 type = OSD_SEC_OBJ_USER; 1986 descriptor_type = OSD_SEC_OBJ_DESC_OBJ; 1987 } 1988 WARN_ON(!obj->partition); 1989 } else { 1990 type = obj->partition ? OSD_SEC_OBJ_PARTITION : 1991 OSD_SEC_OBJ_ROOT; 1992 descriptor_type = OSD_SEC_OBJ_DESC_PAR; 1993 } 1994 1995 memset(cap, 0, sizeof(*cap)); 1996 1997 cap->h.format = OSD_SEC_CAP_FORMAT_VER1; 1998 cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */ 1999 cap->h.security_method = OSD_SEC_NOSEC; 2000/* cap->expiration_time; 2001 cap->AUDIT[30-10]; 2002 cap->discriminator[42-30]; 2003 cap->object_created_time; */ 2004 cap->h.object_type = type; 2005 osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS); 2006 cap->h.object_descriptor_type = descriptor_type; 2007 cap->od.obj_desc.policy_access_tag = 0; 2008 cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition); 2009 cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id); 2010} 2011EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps); 2012 2013/* FIXME: Extract version from caps pointer. 2014 * Also Pete's target only supports caps from OSDv1 for now 2015 */ 2016void osd_set_caps(struct osd_cdb *cdb, const void *caps) 2017{ 2018 /* NOTE: They start at same address */ 2019 memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN); 2020} 2021 2022bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused) 2023{ 2024 return false; 2025} 2026 2027void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused) 2028{ 2029} 2030 2031void osd_sec_sign_data(void *data_integ __unused, 2032 struct bio *bio __unused, const u8 *cap_key __unused) 2033{ 2034} 2035 2036/* 2037 * Declared in osd_protocol.h 2038 * 4.12.5 Data-In and Data-Out buffer offsets 2039 * byte offset = mantissa * (2^(exponent+8)) 2040 * Returns the smallest allowed encoded offset that contains given @offset 2041 * The actual encoded offset returned is @offset + *@padding. 2042 */ 2043osd_cdb_offset __osd_encode_offset( 2044 u64 offset, unsigned *padding, int min_shift, int max_shift) 2045{ 2046 u64 try_offset = -1, mod, align; 2047 osd_cdb_offset be32_offset; 2048 int shift; 2049 2050 *padding = 0; 2051 if (!offset) 2052 return 0; 2053 2054 for (shift = min_shift; shift < max_shift; ++shift) { 2055 try_offset = offset >> shift; 2056 if (try_offset < (1 << OSD_OFFSET_MAX_BITS)) 2057 break; 2058 } 2059 2060 BUG_ON(shift == max_shift); 2061 2062 align = 1 << shift; 2063 mod = offset & (align - 1); 2064 if (mod) { 2065 *padding = align - mod; 2066 try_offset += 1; 2067 } 2068 2069 try_offset |= ((shift - 8) & 0xf) << 28; 2070 be32_offset = cpu_to_be32((u32)try_offset); 2071 2072 OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n", 2073 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift, 2074 be32_offset, *padding); 2075 return be32_offset; 2076}