Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '5.12/scsi-fixes' into 5.13/scsi-staging

Resolve a couple of conflicts between the 5.12 fixes branch and the
5.13 staging tree (iSCSI target and UFS).

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+66 -57
+45 -33
drivers/scsi/hpsa_cmd.h
··· 20 20 #ifndef HPSA_CMD_H 21 21 #define HPSA_CMD_H 22 22 23 + #include <linux/compiler.h> 24 + 25 + #include <linux/build_bug.h> /* static_assert */ 26 + #include <linux/stddef.h> /* offsetof */ 27 + 23 28 /* general boundary defintions */ 24 29 #define SENSEINFOBYTES 32 /* may vary between hbas */ 25 30 #define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */ ··· 205 200 MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */ 206 201 207 202 /* SCSI-3 Commands */ 208 - #pragma pack(1) 209 - 210 203 #define HPSA_INQUIRY 0x12 211 204 struct InquiryData { 212 205 u8 data_byte[36]; 213 - }; 206 + } __packed; 214 207 215 208 #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ 216 209 #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ ··· 224 221 u8 xor_mult[2]; /**< XOR multipliers for this position, 225 222 * valid for data disks only */ 226 223 u8 reserved[2]; 227 - }; 224 + } __packed; 228 225 229 226 struct raid_map_data { 230 227 __le32 structure_size; /* Size of entire structure in bytes */ ··· 250 247 __le16 dekindex; /* Data encryption key index. */ 251 248 u8 reserved[16]; 252 249 struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; 253 - }; 250 + } __packed; 254 251 255 252 struct ReportLUNdata { 256 253 u8 LUNListLength[4]; 257 254 u8 extended_response_flag; 258 255 u8 reserved[3]; 259 256 u8 LUN[HPSA_MAX_LUN][8]; 260 - }; 257 + } __packed; 261 258 262 259 struct ext_report_lun_entry { 263 260 u8 lunid[8]; ··· 272 269 u8 lun_count; /* multi-lun device, how many luns */ 273 270 u8 redundant_paths; 274 271 u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ 275 - }; 272 + } __packed; 276 273 277 274 struct ReportExtendedLUNdata { 278 275 u8 LUNListLength[4]; 279 276 u8 extended_response_flag; 280 277 u8 reserved[3]; 281 278 struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN]; 282 - }; 279 + } __packed; 283 280 284 281 struct SenseSubsystem_info { 285 282 u8 reserved[36]; 286 283 u8 portname[8]; 287 284 u8 reserved1[1108]; 288 - }; 285 + } __packed; 289 286 290 287 /* BMIC commands */ 291 288 #define BMIC_READ 0x26 ··· 320 317 u8 Targ:6; 321 318 u8 Mode:2; /* b10 */ 322 319 } LogUnit; 323 - }; 320 + } __packed; 324 321 325 322 struct PhysDevAddr { 326 323 u32 TargetId:24; ··· 328 325 u32 Mode:2; 329 326 /* 2 level target device addr */ 330 327 union SCSI3Addr Target[2]; 331 - }; 328 + } __packed; 332 329 333 330 struct LogDevAddr { 334 331 u32 VolId:30; 335 332 u32 Mode:2; 336 333 u8 reserved[4]; 337 - }; 334 + } __packed; 338 335 339 336 union LUNAddr { 340 337 u8 LunAddrBytes[8]; 341 338 union SCSI3Addr SCSI3Lun[4]; 342 339 struct PhysDevAddr PhysDev; 343 340 struct LogDevAddr LogDev; 344 - }; 341 + } __packed; 345 342 346 343 struct CommandListHeader { 347 344 u8 ReplyQueue; ··· 349 346 __le16 SGTotal; 350 347 __le64 tag; 351 348 union LUNAddr LUN; 352 - }; 349 + } __packed; 353 350 354 351 struct RequestBlock { 355 352 u8 CDBLen; ··· 368 365 #define GET_DIR(tad) (((tad) >> 6) & 0x03) 369 366 u16 Timeout; 370 367 u8 CDB[16]; 371 - }; 368 + } __packed; 372 369 373 370 struct ErrDescriptor { 374 371 __le64 Addr; 375 372 __le32 Len; 376 - }; 373 + } __packed; 377 374 378 375 struct SGDescriptor { 379 376 __le64 Addr; 380 377 __le32 Len; 381 378 __le32 Ext; 382 - }; 379 + } __packed; 383 380 384 381 union MoreErrInfo { 385 382 struct { ··· 393 390 u8 offense_num; /* byte # of offense 0-base */ 394 391 u32 offense_value; 395 392 } Invalid_Cmd; 396 - }; 393 + } __packed; 394 + 397 395 struct ErrorInfo { 398 396 u8 ScsiStatus; 399 397 u8 SenseLen; ··· 402 398 u32 ResidualCnt; 403 399 union MoreErrInfo MoreErrInfo; 404 400 u8 SenseInfo[SENSEINFOBYTES]; 405 - }; 401 + } __packed; 406 402 /* Command types */ 407 403 #define CMD_IOCTL_PEND 0x01 408 404 #define CMD_SCSI 0x03 ··· 457 453 atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ 458 454 } __aligned(COMMANDLIST_ALIGNMENT); 459 455 456 + /* 457 + * Make sure our embedded atomic variable is aligned. Otherwise we break atomic 458 + * operations on architectures that don't support unaligned atomics like IA64. 459 + * 460 + * The assert guards against reintroductin against unwanted __packed to 461 + * the struct CommandList. 462 + */ 463 + static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0); 464 + 460 465 /* Max S/G elements in I/O accelerator command */ 461 466 #define IOACCEL1_MAXSGENTRIES 24 462 467 #define IOACCEL2_MAXSGENTRIES 28 ··· 502 489 __le64 host_addr; /* 0x70 - 0x77 */ 503 490 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 504 491 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 505 - } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); 492 + } __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); 506 493 507 494 #define IOACCEL1_FUNCTION_SCSIIO 0x00 508 495 #define IOACCEL1_SGLOFFSET 32 ··· 532 519 u8 chain_indicator; 533 520 #define IOACCEL2_CHAIN 0x80 534 521 #define IOACCEL2_LAST_SG 0x40 535 - }; 522 + } __packed; 536 523 537 524 /* 538 525 * SCSI Response Format structure for IO Accelerator Mode 2 ··· 572 559 u8 sense_data_len; /* sense/response data length */ 573 560 u8 resid_cnt[4]; /* residual count */ 574 561 u8 sense_data_buff[32]; /* sense/response data buffer */ 575 - }; 562 + } __packed; 576 563 577 564 /* 578 565 * Structure for I/O accelerator (mode 2 or m2) commands. ··· 605 592 __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */ 606 593 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; 607 594 struct io_accel2_scsi_response error_data; 608 - } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); 595 + } __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); 609 596 610 597 /* 611 598 * defines for Mode 2 command struct ··· 631 618 __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ 632 619 __le64 error_ptr; /* Error Pointer */ 633 620 __le32 error_len; /* Error Length */ 634 - } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); 621 + } __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); 635 622 636 623 /* Configuration Table Structure */ 637 624 struct HostWrite { ··· 639 626 __le32 command_pool_addr_hi; 640 627 __le32 CoalIntDelay; 641 628 __le32 CoalIntCount; 642 - }; 629 + } __packed; 643 630 644 631 #define SIMPLE_MODE 0x02 645 632 #define PERFORMANT_MODE 0x04 ··· 688 675 #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) 689 676 #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) 690 677 __le32 clear_event_notify; 691 - }; 678 + } __packed; 692 679 693 680 #define NUM_BLOCKFETCH_ENTRIES 8 694 681 struct TransTable_struct { ··· 699 686 __le32 RepQCtrAddrHigh32; 700 687 #define MAX_REPLY_QUEUES 64 701 688 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; 702 - }; 689 + } __packed; 703 690 704 691 struct hpsa_pci_info { 705 692 unsigned char bus; 706 693 unsigned char dev_fn; 707 694 unsigned short domain; 708 695 u32 board_id; 709 - }; 696 + } __packed; 710 697 711 698 struct bmic_identify_controller { 712 699 u8 configured_logical_drive_count; /* offset 0 */ ··· 715 702 u8 pad2[136]; 716 703 u8 controller_mode; /* offset 292 */ 717 704 u8 pad3[32]; 718 - }; 705 + } __packed; 719 706 720 707 721 708 struct bmic_identify_physical_device { ··· 858 845 u8 max_link_rate[256]; 859 846 u8 neg_phys_link_rate[256]; 860 847 u8 box_conn_name[8]; 861 - } __attribute((aligned(512))); 848 + } __packed __attribute((aligned(512))); 862 849 863 850 struct bmic_sense_subsystem_info { 864 851 u8 primary_slot_number; ··· 871 858 u8 secondary_array_serial_number[32]; 872 859 u8 secondary_cache_serial_number[32]; 873 860 u8 pad[332]; 874 - }; 861 + } __packed; 875 862 876 863 struct bmic_sense_storage_box_params { 877 864 u8 reserved[36]; ··· 883 870 u8 reserver_3[84]; 884 871 u8 phys_connector[2]; 885 872 u8 reserved_4[296]; 886 - }; 873 + } __packed; 887 874 888 - #pragma pack() 889 875 #endif /* HPSA_CMD_H */
+4 -4
drivers/scsi/pm8001/pm8001_hwi.c
··· 223 223 PM8001_EVENT_LOG_SIZE; 224 224 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; 225 225 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; 226 - for (i = 0; i < PM8001_MAX_INB_NUM; i++) { 226 + for (i = 0; i < pm8001_ha->max_q_num; i++) { 227 227 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 228 228 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); 229 229 pm8001_ha->inbnd_q_tbl[i].upper_base_addr = ··· 249 249 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; 250 250 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; 251 251 } 252 - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { 252 + for (i = 0; i < pm8001_ha->max_q_num; i++) { 253 253 pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 254 254 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); 255 255 pm8001_ha->outbnd_q_tbl[i].upper_base_addr = ··· 671 671 read_outbnd_queue_table(pm8001_ha); 672 672 /* update main config table ,inbound table and outbound table */ 673 673 update_main_config_table(pm8001_ha); 674 - for (i = 0; i < PM8001_MAX_INB_NUM; i++) 674 + for (i = 0; i < pm8001_ha->max_q_num; i++) 675 675 update_inbnd_queue_table(pm8001_ha, i); 676 - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) 676 + for (i = 0; i < pm8001_ha->max_q_num; i++) 677 677 update_outbnd_queue_table(pm8001_ha, i); 678 678 /* 8081 controller donot require these operations */ 679 679 if (deviceid != 0x8081 && deviceid != 0x0042) {
+1 -1
drivers/scsi/scsi_transport_srp.c
··· 541 541 res = mutex_lock_interruptible(&rport->mutex); 542 542 if (res) 543 543 goto out; 544 - if (rport->state != SRP_RPORT_FAIL_FAST) 544 + if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) 545 545 /* 546 546 * sdev state must be SDEV_TRANSPORT_OFFLINE, transition 547 547 * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+14 -17
drivers/scsi/ufs/ufshcd.c
··· 6383 6383 DECLARE_COMPLETION_ONSTACK(wait); 6384 6384 struct request *req; 6385 6385 unsigned long flags; 6386 - int free_slot, task_tag, err; 6386 + int task_tag, err; 6387 6387 6388 6388 /* 6389 - * Get free slot, sleep if slots are unavailable. 6390 - * Even though we use wait_event() which sleeps indefinitely, 6391 - * the maximum wait time is bounded by %TM_CMD_TIMEOUT. 6389 + * blk_get_request() is used here only to get a free tag. 6392 6390 */ 6393 6391 req = blk_get_request(q, REQ_OP_DRV_OUT, 0); 6394 6392 if (IS_ERR(req)) 6395 6393 return PTR_ERR(req); 6396 6394 6397 6395 req->end_io_data = &wait; 6398 - free_slot = req->tag; 6399 - WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); 6400 6396 ufshcd_hold(hba, false); 6401 6397 6402 6398 spin_lock_irqsave(host->host_lock, flags); 6403 - task_tag = hba->nutrs + free_slot; 6399 + blk_mq_start_request(req); 6404 6400 6401 + task_tag = req->tag; 6405 6402 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); 6406 6403 6407 - memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); 6408 - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); 6404 + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); 6405 + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); 6409 6406 6410 6407 /* send command to the controller */ 6411 - __set_bit(free_slot, &hba->outstanding_tasks); 6408 + __set_bit(task_tag, &hba->outstanding_tasks); 6412 6409 6413 6410 /* Make sure descriptors are ready before ringing the task doorbell */ 6414 6411 wmb(); 6415 6412 6416 - ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); 6413 + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); 6417 6414 /* Make sure that doorbell is committed immediately */ 6418 6415 wmb(); 6419 6416 ··· 6430 6433 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); 6431 6434 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 6432 6435 __func__, tm_function); 6433 - if (ufshcd_clear_tm_cmd(hba, free_slot)) 6434 - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", 6435 - __func__, free_slot); 6436 + if (ufshcd_clear_tm_cmd(hba, task_tag)) 6437 + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", 6438 + __func__, task_tag); 6436 6439 err = -ETIMEDOUT; 6437 6440 } else { 6438 6441 err = 0; 6439 - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); 6442 + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); 6440 6443 6441 6444 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); 6442 6445 } 6443 6446 6444 6447 spin_lock_irqsave(hba->host->host_lock, flags); 6445 - __clear_bit(free_slot, &hba->outstanding_tasks); 6448 + __clear_bit(task_tag, &hba->outstanding_tasks); 6446 6449 spin_unlock_irqrestore(hba->host->host_lock, flags); 6447 6450 6451 + ufshcd_release(hba); 6448 6452 blk_put_request(req); 6449 6453 6450 - ufshcd_release(hba); 6451 6454 return err; 6452 6455 } 6453 6456
+2 -2
drivers/target/iscsi/iscsi_target.c
··· 1166 1166 1167 1167 target_get_sess_cmd(&cmd->se_cmd, true); 1168 1168 1169 + cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; 1169 1170 cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb, 1170 1171 GFP_KERNEL); 1172 + 1171 1173 if (cmd->sense_reason) { 1172 1174 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { 1173 1175 return iscsit_add_reject_cmd(cmd, ··· 1183 1181 if (cmd->sense_reason) 1184 1182 goto attach_cmd; 1185 1183 1186 - /* only used for printks or comparing with ->ref_task_tag */ 1187 - cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; 1188 1184 cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); 1189 1185 if (cmd->sense_reason) 1190 1186 goto attach_cmd;