Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: libsas: Use new workqueue to run sas event and disco event

Now all libsas works are queued to scsi host workqueue, include sas
event work post by LLDD and sas discovery work, and a sas hotplug flow
may be divided into several works, e.g libsas receive a
PORTE_BYTES_DMAED event, currently we process it as following steps:

sas_form_port --- run in work in shost workq
sas_discover_domain --- run in another work in shost workq
...
sas_probe_devices --- run in new work in shost workq
We found during hot-add a device, libsas may need run several
works in same workqueue to add device in system, the process is
not atomic, it may interrupt by other sas event works, like
PHYE_LOSS_OF_SIGNAL.

This patch is preparation of execute libsas sas event in sync. We need
to use different workqueue to run sas event and disco event. Otherwise
the work will be blocked for waiting another chained work in the same
workqueue.

Signed-off-by: Yijing Wang <wangyijing@huawei.com>
CC: John Garry <john.garry@huawei.com>
CC: Johannes Thumshirn <jthumshirn@suse.de>
CC: Ewan Milne <emilne@redhat.com>
CC: Christoph Hellwig <hch@lst.de>
CC: Tomas Henzl <thenzl@redhat.com>
CC: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jason Yan <yanaijie@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Jason Yan and committed by
Martin K. Petersen
93bdbd06 8eea9dd8

+25 -4
+1 -1
drivers/scsi/libsas/sas_discover.c
··· 534 534 * workqueue, or known to be submitted from a context that is 535 535 * not racing against draining 536 536 */ 537 - scsi_queue_work(ha->core.shost, &sw->work); 537 + queue_work(ha->disco_q, &sw->work); 538 538 } 539 539 540 540 static void sas_chain_event(int event, unsigned long *pending,
+3 -3
drivers/scsi/libsas/sas_event.c
··· 40 40 if (list_empty(&sw->drain_node)) 41 41 list_add_tail(&sw->drain_node, &ha->defer_q); 42 42 } else 43 - rc = scsi_queue_work(ha->core.shost, &sw->work); 43 + rc = queue_work(ha->event_q, &sw->work); 44 44 45 45 return rc; 46 46 } ··· 61 61 62 62 void __sas_drain_work(struct sas_ha_struct *ha) 63 63 { 64 - struct workqueue_struct *wq = ha->core.shost->work_q; 65 64 struct sas_work *sw, *_sw; 66 65 int ret; 67 66 ··· 69 70 spin_lock_irq(&ha->lock); 70 71 spin_unlock_irq(&ha->lock); 71 72 72 - drain_workqueue(wq); 73 + drain_workqueue(ha->event_q); 74 + drain_workqueue(ha->disco_q); 73 75 74 76 spin_lock_irq(&ha->lock); 75 77 clear_bit(SAS_HA_DRAINING, &ha->state);
+18
drivers/scsi/libsas/sas_init.c
··· 110 110 111 111 int sas_register_ha(struct sas_ha_struct *sas_ha) 112 112 { 113 + char name[64]; 113 114 int error = 0; 114 115 115 116 mutex_init(&sas_ha->disco_mutex); ··· 144 143 goto Undo_ports; 145 144 } 146 145 146 + error = -ENOMEM; 147 + snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev)); 148 + sas_ha->event_q = create_singlethread_workqueue(name); 149 + if (!sas_ha->event_q) 150 + goto Undo_ports; 151 + 152 + snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev)); 153 + sas_ha->disco_q = create_singlethread_workqueue(name); 154 + if (!sas_ha->disco_q) 155 + goto Undo_event_q; 156 + 147 157 INIT_LIST_HEAD(&sas_ha->eh_done_q); 148 158 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 149 159 150 160 return 0; 161 + 162 + Undo_event_q: 163 + destroy_workqueue(sas_ha->event_q); 151 164 Undo_ports: 152 165 sas_unregister_ports(sas_ha); 153 166 Undo_phys: ··· 191 176 mutex_lock(&sas_ha->drain_mutex); 192 177 __sas_drain_work(sas_ha); 193 178 mutex_unlock(&sas_ha->drain_mutex); 179 + 180 + destroy_workqueue(sas_ha->disco_q); 181 + destroy_workqueue(sas_ha->event_q); 194 182 195 183 return 0; 196 184 }
+3
include/scsi/libsas.h
··· 389 389 struct device *dev; /* should be set */ 390 390 struct module *lldd_module; /* should be set */ 391 391 392 + struct workqueue_struct *event_q; 393 + struct workqueue_struct *disco_q; 394 + 392 395 u8 *sas_addr; /* must be set */ 393 396 u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; 394 397