···503503 host_bcode = FC_ERROR;504504 goto err;505505 }506506- if (offset + len > fsp->data_len) {506506+ if (size_add(offset, len) > fsp->data_len) {507507 /* this should never happen */508508 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&509509 fc_frame_crc_check(fp))
+4-4
drivers/scsi/qla4xxx/ql4_os.c
···41044104 * The mid-level driver tries to ensure that queuecommand never gets41054105 * invoked concurrently with itself or the interrupt handler (although41064106 * the interrupt handler may call this routine as part of request-41074107- * completion handling). Unfortunely, it sometimes calls the scheduler41074107+ * completion handling). Unfortunately, it sometimes calls the scheduler41084108 * in interrupt context which is a big NO! NO!.41094109 **/41104110static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)···46474647 cmd = scsi_host_find_tag(ha->host, index);46484648 /*46494649 * We cannot just check if the index is valid,46504650- * becase if we are run from the scsi eh, then46504650+ * because if we are run from the scsi eh, then46514651 * the scsi/block layer is going to prevent46524652 * the tag from being released.46534653 */···49524952 /* Upon successful firmware/chip reset, re-initialize the adapter */49534953 if (status == QLA_SUCCESS) {49544954 /* For ISP-4xxx, force function 1 to always initialize49554955- * before function 3 to prevent both funcions from49554955+ * before function 3 to prevent both functions from49564956 * stepping on top of the other */49574957 if (is_qla40XX(ha) && (ha->mac_index == 3))49584958 ssleep(6);···69146914 struct ddb_entry *ddb_entry = NULL;6915691569166916 /* Create session object, with INVALID_ENTRY,69176917- * the targer_id would get set when we issue the login69176917+ * the target_id would get set when we issue the login69186918 */69196919 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,69206920 cmds_max, sizeof(struct ddb_entry),
+46-52
drivers/scsi/storvsc_drv.c
···14061406 }1407140714081408 /*14091409- * Our channel array is sparsley populated and we14091409+ * Our channel array could be sparsley populated and we14101410 * initiated I/O on a processor/hw-q that does not14111411 * currently have a designated channel. Fix this.14121412 * The strategy is simple:14131413- * I. Ensure NUMA locality14141414- * II. Distribute evenly (best effort)14131413+ * I. Prefer the channel associated with the current CPU14141414+ * II. Ensure NUMA locality14151415+ * III. Distribute evenly (best effort)14151416 */14171417+14181418+ /* Prefer the channel on the I/O issuing processor/hw-q */14191419+ if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus))14201420+ return stor_device->stor_chns[q_num];1416142114171422 node_mask = cpumask_of_node(cpu_to_node(q_num));14181423···14741469 /* See storvsc_change_target_cpu(). */14751470 outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);14761471 if (outgoing_channel != NULL) {14771477- if (outgoing_channel->target_cpu == q_num) {14781478- /*14791479- * Ideally, we want to pick a different channel if14801480- * available on the same NUMA node.14811481- */14821482- node_mask = cpumask_of_node(cpu_to_node(q_num));14831483- for_each_cpu_wrap(tgt_cpu,14841484- &stor_device->alloced_cpus, q_num + 1) {14851485- if (!cpumask_test_cpu(tgt_cpu, node_mask))14861486- continue;14871487- if (tgt_cpu == q_num)14881488- continue;14891489- channel = READ_ONCE(14901490- stor_device->stor_chns[tgt_cpu]);14911491- if (channel == NULL)14921492- continue;14931493- if (hv_get_avail_to_write_percent(14941494- &channel->outbound)14951495- > ring_avail_percent_lowater) {14961496- outgoing_channel = channel;14971497- goto found_channel;14981498- }14991499- }14721472+ if (hv_get_avail_to_write_percent(&outgoing_channel->outbound)14731473+ > ring_avail_percent_lowater)14741474+ goto found_channel;1500147515011501- /*15021502- * All the other channels on the same NUMA node are15031503- * busy. Try to use the channel on the current CPU15041504- */15051505- if (hv_get_avail_to_write_percent(15061506- &outgoing_channel->outbound)15071507- > ring_avail_percent_lowater)14761476+ /*14771477+ * Channel is busy, try to find a channel on the same NUMA node14781478+ */14791479+ node_mask = cpumask_of_node(cpu_to_node(q_num));14801480+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,14811481+ q_num + 1) {14821482+ if (!cpumask_test_cpu(tgt_cpu, node_mask))14831483+ continue;14841484+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);14851485+ if (!channel)14861486+ continue;14871487+ if (hv_get_avail_to_write_percent(&channel->outbound)14881488+ > ring_avail_percent_lowater) {14891489+ outgoing_channel = channel;15081490 goto found_channel;15091509-15101510- /*15111511- * If we reach here, all the channels on the current15121512- * NUMA node are busy. Try to find a channel in15131513- * other NUMA nodes15141514- */15151515- for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {15161516- if (cpumask_test_cpu(tgt_cpu, node_mask))15171517- continue;15181518- channel = READ_ONCE(15191519- stor_device->stor_chns[tgt_cpu]);15201520- if (channel == NULL)15211521- continue;15221522- if (hv_get_avail_to_write_percent(15231523- &channel->outbound)15241524- > ring_avail_percent_lowater) {15251525- outgoing_channel = channel;15261526- goto found_channel;15271527- }15281491 }15291492 }14931493+14941494+ /*14951495+ * If we reach here, all the channels on the current14961496+ * NUMA node are busy. Try to find a channel in14971497+ * all NUMA nodes14981498+ */14991499+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,15001500+ q_num + 1) {15011501+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);15021502+ if (!channel)15031503+ continue;15041504+ if (hv_get_avail_to_write_percent(&channel->outbound)15051505+ > ring_avail_percent_lowater) {15061506+ outgoing_channel = channel;15071507+ goto found_channel;15081508+ }15091509+ }15101510+ /*15111511+ * If we reach here, all the channels are busy. Use the15121512+ * original channel found.15131513+ */15301514 } else {15311515 spin_lock_irqsave(&stor_device->lock, flags);15321516 outgoing_channel = stor_device->stor_chns[q_num];