jcs's openbsd hax
openbsd
1/* $OpenBSD: vscsi.c,v 1.64 2025/09/16 12:18:10 hshoexer Exp $ */
2
3/*
4 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/systm.h>
21#include <sys/kernel.h>
22#include <sys/malloc.h>
23#include <sys/device.h>
24#include <sys/conf.h>
25#include <sys/queue.h>
26#include <sys/rwlock.h>
27#include <sys/pool.h>
28#include <sys/task.h>
29#include <sys/ioctl.h>
30#include <sys/event.h>
31
32#include <scsi/scsi_all.h>
33#include <scsi/scsiconf.h>
34
35#include <dev/vscsivar.h>
36
37/*
38 * Locks used to protect struct members and global data
39 * s sc_state_mtx
40 */
41
42int vscsi_match(struct device *, void *, void *);
43void vscsi_attach(struct device *, struct device *, void *);
44
45struct vscsi_ccb {
46 TAILQ_ENTRY(vscsi_ccb) ccb_entry;
47 int ccb_tag;
48 struct scsi_xfer *ccb_xs;
49 size_t ccb_datalen;
50};
51
52TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
53
54enum vscsi_state {
55 VSCSI_S_CLOSED,
56 VSCSI_S_CONFIG,
57 VSCSI_S_RUNNING
58};
59
60struct vscsi_softc {
61 struct device sc_dev;
62 struct scsibus_softc *sc_scsibus;
63
64 struct mutex sc_state_mtx;
65 enum vscsi_state sc_state;
66 u_int sc_ref_count;
67 struct pool sc_ccb_pool;
68
69 struct scsi_iopool sc_iopool;
70
71 struct vscsi_ccb_list sc_ccb_i2t; /* [s] */
72 struct vscsi_ccb_list sc_ccb_t2i;
73 int sc_ccb_tag;
74 struct mutex sc_poll_mtx;
75 struct rwlock sc_ioc_lock;
76
77 struct klist sc_klist; /* [s] */
78};
79
80#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
81#define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
82
83const struct cfattach vscsi_ca = {
84 sizeof(struct vscsi_softc),
85 vscsi_match,
86 vscsi_attach
87};
88
89struct cfdriver vscsi_cd = {
90 NULL,
91 "vscsi",
92 DV_DULL,
93 CD_COCOVM
94};
95
96void vscsi_cmd(struct scsi_xfer *);
97int vscsi_probe(struct scsi_link *);
98void vscsi_free(struct scsi_link *);
99
100const struct scsi_adapter vscsi_switch = {
101 vscsi_cmd, NULL, vscsi_probe, vscsi_free, NULL
102};
103
104int vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
105int vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
106int vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
107int vscsi_devevent(struct vscsi_softc *, u_long,
108 struct vscsi_ioc_devevent *);
109void vscsi_devevent_task(void *);
110void vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
111
112void * vscsi_ccb_get(void *);
113void vscsi_ccb_put(void *, void *);
114
115void filt_vscsidetach(struct knote *);
116int filt_vscsiread(struct knote *, long);
117int filt_vscsimodify(struct kevent *, struct knote *);
118int filt_vscsiprocess(struct knote *, struct kevent *);
119
120const struct filterops vscsi_filtops = {
121 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
122 .f_attach = NULL,
123 .f_detach = filt_vscsidetach,
124 .f_event = filt_vscsiread,
125 .f_modify = filt_vscsimodify,
126 .f_process = filt_vscsiprocess,
127};
128
129
130int
131vscsi_match(struct device *parent, void *match, void *aux)
132{
133 return (1);
134}
135
136void
137vscsi_attach(struct device *parent, struct device *self, void *aux)
138{
139 struct vscsi_softc *sc = (struct vscsi_softc *)self;
140 struct scsibus_attach_args saa;
141
142 printf("\n");
143
144 mtx_init(&sc->sc_state_mtx, IPL_MPFLOOR);
145 sc->sc_state = VSCSI_S_CLOSED;
146
147 TAILQ_INIT(&sc->sc_ccb_i2t);
148 TAILQ_INIT(&sc->sc_ccb_t2i);
149 mtx_init(&sc->sc_poll_mtx, IPL_BIO);
150 rw_init(&sc->sc_ioc_lock, "vscsiioc");
151 scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
152 klist_init_mutex(&sc->sc_klist, &sc->sc_state_mtx);
153
154 saa.saa_adapter = &vscsi_switch;
155 saa.saa_adapter_softc = sc;
156 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
157 saa.saa_adapter_buswidth = 256;
158 saa.saa_luns = 8;
159 saa.saa_openings = 16;
160 saa.saa_pool = &sc->sc_iopool;
161 saa.saa_quirks = saa.saa_flags = 0;
162 saa.saa_wwpn = saa.saa_wwnn = 0;
163
164 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
165 &saa, scsiprint);
166}
167
168void
169vscsi_cmd(struct scsi_xfer *xs)
170{
171 struct scsi_link *link = xs->sc_link;
172 struct vscsi_softc *sc = link->bus->sb_adapter_softc;
173 struct vscsi_ccb *ccb = xs->io;
174 int polled = ISSET(xs->flags, SCSI_POLL);
175 int running = 0;
176
177 if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
178 printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
179 xs->cmd.opcode);
180 xs->error = XS_DRIVER_STUFFUP;
181 scsi_done(xs);
182 return;
183 }
184
185 ccb->ccb_xs = xs;
186
187 mtx_enter(&sc->sc_state_mtx);
188 if (sc->sc_state == VSCSI_S_RUNNING) {
189 running = 1;
190 TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
191 }
192 knote_locked(&sc->sc_klist, 0);
193 mtx_leave(&sc->sc_state_mtx);
194
195 if (!running) {
196 xs->error = XS_DRIVER_STUFFUP;
197 scsi_done(xs);
198 return;
199 }
200
201 if (polled) {
202 mtx_enter(&sc->sc_poll_mtx);
203 while (ccb->ccb_xs != NULL)
204 msleep_nsec(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll",
205 INFSLP);
206 mtx_leave(&sc->sc_poll_mtx);
207 scsi_done(xs);
208 }
209}
210
211void
212vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
213{
214 struct scsi_xfer *xs = ccb->ccb_xs;
215
216 if (ISSET(xs->flags, SCSI_POLL)) {
217 mtx_enter(&sc->sc_poll_mtx);
218 ccb->ccb_xs = NULL;
219 wakeup(ccb);
220 mtx_leave(&sc->sc_poll_mtx);
221 } else
222 scsi_done(xs);
223}
224
225int
226vscsi_probe(struct scsi_link *link)
227{
228 struct vscsi_softc *sc = link->bus->sb_adapter_softc;
229 int rv = 0;
230
231 mtx_enter(&sc->sc_state_mtx);
232 if (sc->sc_state == VSCSI_S_RUNNING)
233 sc->sc_ref_count++;
234 else
235 rv = ENXIO;
236 mtx_leave(&sc->sc_state_mtx);
237
238 return (rv);
239}
240
241void
242vscsi_free(struct scsi_link *link)
243{
244 struct vscsi_softc *sc = link->bus->sb_adapter_softc;
245
246 mtx_enter(&sc->sc_state_mtx);
247 sc->sc_ref_count--;
248 if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
249 wakeup(&sc->sc_ref_count);
250 mtx_leave(&sc->sc_state_mtx);
251}
252
253int
254vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
255{
256 struct vscsi_softc *sc = DEV2SC(dev);
257 enum vscsi_state state = VSCSI_S_RUNNING;
258 int rv = 0;
259
260 if (sc == NULL)
261 return (ENXIO);
262
263 mtx_enter(&sc->sc_state_mtx);
264 if (sc->sc_state != VSCSI_S_CLOSED)
265 rv = EBUSY;
266 else
267 sc->sc_state = VSCSI_S_CONFIG;
268 mtx_leave(&sc->sc_state_mtx);
269
270 if (rv != 0) {
271 device_unref(&sc->sc_dev);
272 return (rv);
273 }
274
275 pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, IPL_BIO, 0,
276 "vscsiccb", NULL);
277
278 /* we need to guarantee some ccbs will be available for the iopool */
279 rv = pool_prime(&sc->sc_ccb_pool, 8);
280 if (rv != 0) {
281 pool_destroy(&sc->sc_ccb_pool);
282 state = VSCSI_S_CLOSED;
283 }
284
285 /* commit changes */
286 mtx_enter(&sc->sc_state_mtx);
287 sc->sc_state = state;
288 mtx_leave(&sc->sc_state_mtx);
289
290 device_unref(&sc->sc_dev);
291 return (rv);
292}
293
294int
295vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
296{
297 struct vscsi_softc *sc = DEV2SC(dev);
298 int read = 0;
299 int err = 0;
300
301 if (sc == NULL)
302 return (ENXIO);
303
304 rw_enter_write(&sc->sc_ioc_lock);
305
306 switch (cmd) {
307 case VSCSI_I2T:
308 err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
309 break;
310
311 case VSCSI_DATA_READ:
312 read = 1;
313 case VSCSI_DATA_WRITE:
314 err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
315 break;
316
317 case VSCSI_T2I:
318 err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
319 break;
320
321 case VSCSI_REQPROBE:
322 case VSCSI_REQDETACH:
323 err = vscsi_devevent(sc, cmd,
324 (struct vscsi_ioc_devevent *)addr);
325 break;
326
327 default:
328 err = ENOTTY;
329 break;
330 }
331
332 rw_exit_write(&sc->sc_ioc_lock);
333
334 device_unref(&sc->sc_dev);
335 return (err);
336}
337
338int
339vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
340{
341 struct vscsi_ccb *ccb;
342 struct scsi_xfer *xs;
343 struct scsi_link *link;
344
345 mtx_enter(&sc->sc_state_mtx);
346 ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
347 if (ccb != NULL)
348 TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
349 mtx_leave(&sc->sc_state_mtx);
350
351 if (ccb == NULL)
352 return (EAGAIN);
353
354 xs = ccb->ccb_xs;
355 link = xs->sc_link;
356
357 i2t->tag = ccb->ccb_tag;
358 i2t->target = link->target;
359 i2t->lun = link->lun;
360 memcpy(&i2t->cmd, &xs->cmd, xs->cmdlen);
361 i2t->cmdlen = xs->cmdlen;
362 i2t->datalen = xs->datalen;
363
364 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
365 case SCSI_DATA_IN:
366 i2t->direction = VSCSI_DIR_READ;
367 break;
368 case SCSI_DATA_OUT:
369 i2t->direction = VSCSI_DIR_WRITE;
370 break;
371 default:
372 i2t->direction = VSCSI_DIR_NONE;
373 break;
374 }
375
376 TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
377
378 return (0);
379}
380
381int
382vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
383{
384 struct vscsi_ccb *ccb;
385 struct scsi_xfer *xs;
386 int xsread;
387 u_int8_t *buf;
388 int rv = EINVAL;
389
390 TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
391 if (ccb->ccb_tag == data->tag)
392 break;
393 }
394 if (ccb == NULL)
395 return (EFAULT);
396
397 xs = ccb->ccb_xs;
398
399 if (data->datalen > xs->datalen - ccb->ccb_datalen)
400 return (ENOMEM);
401
402 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
403 case SCSI_DATA_IN:
404 xsread = 1;
405 break;
406 case SCSI_DATA_OUT:
407 xsread = 0;
408 break;
409 default:
410 return (EINVAL);
411 }
412
413 if (read != xsread)
414 return (EINVAL);
415
416 buf = xs->data;
417 buf += ccb->ccb_datalen;
418
419 if (read)
420 rv = copyin(data->data, buf, data->datalen);
421 else
422 rv = copyout(buf, data->data, data->datalen);
423
424 if (rv == 0)
425 ccb->ccb_datalen += data->datalen;
426
427 return (rv);
428}
429
430int
431vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
432{
433 struct vscsi_ccb *ccb;
434 struct scsi_xfer *xs;
435 int rv = 0;
436
437 TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
438 if (ccb->ccb_tag == t2i->tag)
439 break;
440 }
441 if (ccb == NULL)
442 return (EFAULT);
443
444 TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
445
446 xs = ccb->ccb_xs;
447
448 xs->resid = xs->datalen - ccb->ccb_datalen;
449 xs->status = SCSI_OK;
450
451 switch (t2i->status) {
452 case VSCSI_STAT_DONE:
453 xs->error = XS_NOERROR;
454 break;
455 case VSCSI_STAT_SENSE:
456 xs->error = XS_SENSE;
457 memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense));
458 break;
459 case VSCSI_STAT_RESET:
460 xs->error = XS_RESET;
461 break;
462 case VSCSI_STAT_ERR:
463 default:
464 xs->error = XS_DRIVER_STUFFUP;
465 break;
466 }
467
468 vscsi_done(sc, ccb);
469
470 return (rv);
471}
472
473struct vscsi_devevent_task {
474 struct vscsi_softc *sc;
475 struct task t;
476 struct vscsi_ioc_devevent de;
477 u_long cmd;
478};
479
480int
481vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
482 struct vscsi_ioc_devevent *de)
483{
484 struct vscsi_devevent_task *dt;
485
486 dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
487 if (dt == NULL)
488 return (ENOMEM);
489
490 task_set(&dt->t, vscsi_devevent_task, dt);
491 dt->sc = sc;
492 dt->de = *de;
493 dt->cmd = cmd;
494
495 device_ref(&sc->sc_dev);
496 task_add(systq, &dt->t);
497
498 return (0);
499}
500
501void
502vscsi_devevent_task(void *xdt)
503{
504 struct vscsi_devevent_task *dt = xdt;
505 struct vscsi_softc *sc = dt->sc;
506 int state;
507
508 mtx_enter(&sc->sc_state_mtx);
509 state = sc->sc_state;
510 mtx_leave(&sc->sc_state_mtx);
511
512 if (state != VSCSI_S_RUNNING)
513 goto gone;
514
515 switch (dt->cmd) {
516 case VSCSI_REQPROBE:
517 scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
518 break;
519 case VSCSI_REQDETACH:
520 scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
521 DETACH_FORCE);
522 break;
523#ifdef DIAGNOSTIC
524 default:
525 panic("unexpected vscsi_devevent cmd");
526 /* NOTREACHED */
527#endif
528 }
529
530gone:
531 device_unref(&sc->sc_dev);
532
533 free(dt, M_TEMP, sizeof(*dt));
534}
535
536int
537vscsikqfilter(dev_t dev, struct knote *kn)
538{
539 struct vscsi_softc *sc = DEV2SC(dev);
540
541 if (sc == NULL)
542 return (ENXIO);
543
544 switch (kn->kn_filter) {
545 case EVFILT_READ:
546 kn->kn_fop = &vscsi_filtops;
547 break;
548 default:
549 device_unref(&sc->sc_dev);
550 return (EINVAL);
551 }
552
553 kn->kn_hook = sc;
554 klist_insert(&sc->sc_klist, kn);
555
556 /* device ref is given to the knote in the klist */
557
558 return (0);
559}
560
561void
562filt_vscsidetach(struct knote *kn)
563{
564 struct vscsi_softc *sc = kn->kn_hook;
565
566 klist_remove(&sc->sc_klist, kn);
567 device_unref(&sc->sc_dev);
568}
569
570int
571filt_vscsiread(struct knote *kn, long hint)
572{
573 struct vscsi_softc *sc = kn->kn_hook;
574
575 return (!TAILQ_EMPTY(&sc->sc_ccb_i2t));
576}
577
578int
579filt_vscsimodify(struct kevent *kev, struct knote *kn)
580{
581 struct vscsi_softc *sc = kn->kn_hook;
582 int active;
583
584 mtx_enter(&sc->sc_state_mtx);
585 active = knote_modify(kev, kn);
586 mtx_leave(&sc->sc_state_mtx);
587
588 return (active);
589}
590
591int
592filt_vscsiprocess(struct knote *kn, struct kevent *kev)
593{
594 struct vscsi_softc *sc = kn->kn_hook;
595 int active;
596
597 mtx_enter(&sc->sc_state_mtx);
598 active = knote_process(kn, kev);
599 mtx_leave(&sc->sc_state_mtx);
600
601 return (active);
602}
603
604int
605vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
606{
607 struct vscsi_softc *sc = DEV2SC(dev);
608 struct vscsi_ccb *ccb;
609
610 if (sc == NULL)
611 return (ENXIO);
612
613 mtx_enter(&sc->sc_state_mtx);
614 KASSERT(sc->sc_state == VSCSI_S_RUNNING);
615 sc->sc_state = VSCSI_S_CONFIG;
616 mtx_leave(&sc->sc_state_mtx);
617
618 scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
619
620 while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
621 TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
622 ccb->ccb_xs->error = XS_RESET;
623 vscsi_done(sc, ccb);
624 }
625
626 while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
627 TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
628 ccb->ccb_xs->error = XS_RESET;
629 vscsi_done(sc, ccb);
630 }
631
632 scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
633
634 mtx_enter(&sc->sc_state_mtx);
635 while (sc->sc_ref_count > 0) {
636 msleep_nsec(&sc->sc_ref_count, &sc->sc_state_mtx,
637 PRIBIO, "vscsiref", INFSLP);
638 }
639 mtx_leave(&sc->sc_state_mtx);
640
641 pool_destroy(&sc->sc_ccb_pool);
642
643 mtx_enter(&sc->sc_state_mtx);
644 sc->sc_state = VSCSI_S_CLOSED;
645 mtx_leave(&sc->sc_state_mtx);
646
647 device_unref(&sc->sc_dev);
648 return (0);
649}
650
651void *
652vscsi_ccb_get(void *cookie)
653{
654 struct vscsi_softc *sc = cookie;
655 struct vscsi_ccb *ccb = NULL;
656
657 ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
658 if (ccb != NULL) {
659 ccb->ccb_tag = sc->sc_ccb_tag++;
660 ccb->ccb_datalen = 0;
661 }
662
663 return (ccb);
664}
665
666void
667vscsi_ccb_put(void *cookie, void *io)
668{
669 struct vscsi_softc *sc = cookie;
670 struct vscsi_ccb *ccb = io;
671
672 pool_put(&sc->sc_ccb_pool, ccb);
673}