Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
5 *
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
8 *
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 *
13 * Module Name:
14 * commctrl.c
15 *
16 * Abstract: Contains all routines for control of the AFA comm layer
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/spinlock.h>
24#include <linux/slab.h>
25#include <linux/completion.h>
26#include <linux/dma-mapping.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h> /* ssleep prototype */
29#include <linux/kthread.h>
30#include <linux/uaccess.h>
31#include <scsi/scsi_host.h>
32
33#include "aacraid.h"
34
35# define AAC_DEBUG_PREAMBLE KERN_INFO
36# define AAC_DEBUG_POSTAMBLE
37/**
38 * ioctl_send_fib - send a FIB from userspace
39 * @dev: adapter is being processed
40 * @arg: arguments to the ioctl call
41 *
42 * This routine sends a fib to the adapter on behalf of a user level
43 * program.
44 */
45static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
46{
47 struct hw_fib * kfib;
48 struct fib *fibptr;
49 struct hw_fib * hw_fib = (struct hw_fib *)0;
50 dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
51 unsigned int size, osize;
52 int retval;
53
54 if (dev->in_reset) {
55 return -EBUSY;
56 }
57 fibptr = aac_fib_alloc(dev);
58 if(fibptr == NULL) {
59 return -ENOMEM;
60 }
61
62 kfib = fibptr->hw_fib_va;
63 /*
64 * First copy in the header so that we can check the size field.
65 */
66 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
67 aac_fib_free(fibptr);
68 return -EFAULT;
69 }
70 /*
71 * Since we copy based on the fib header size, make sure that we
72 * will not overrun the buffer when we copy the memory. Return
73 * an error if we would.
74 */
75 osize = size = le16_to_cpu(kfib->header.Size) +
76 sizeof(struct aac_fibhdr);
77 if (size < le16_to_cpu(kfib->header.SenderSize))
78 size = le16_to_cpu(kfib->header.SenderSize);
79 if (size > dev->max_fib_size) {
80 dma_addr_t daddr;
81
82 if (size > 2048) {
83 retval = -EINVAL;
84 goto cleanup;
85 }
86
87 kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
88 GFP_KERNEL);
89 if (!kfib) {
90 retval = -ENOMEM;
91 goto cleanup;
92 }
93
94 /* Highjack the hw_fib */
95 hw_fib = fibptr->hw_fib_va;
96 hw_fib_pa = fibptr->hw_fib_pa;
97 fibptr->hw_fib_va = kfib;
98 fibptr->hw_fib_pa = daddr;
99 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
100 memcpy(kfib, hw_fib, dev->max_fib_size);
101 }
102
103 if (copy_from_user(kfib, arg, size)) {
104 retval = -EFAULT;
105 goto cleanup;
106 }
107
108 /* Sanity check the second copy */
109 if ((osize != le16_to_cpu(kfib->header.Size) +
110 sizeof(struct aac_fibhdr))
111 || (size < le16_to_cpu(kfib->header.SenderSize))) {
112 retval = -EINVAL;
113 goto cleanup;
114 }
115
116 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
117 aac_adapter_interrupt(dev);
118 /*
119 * Since we didn't really send a fib, zero out the state to allow
120 * cleanup code not to assert.
121 */
122 kfib->header.XferState = 0;
123 } else {
124 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
125 le16_to_cpu(kfib->header.Size) , FsaNormal,
126 1, 1, NULL, NULL);
127 if (retval) {
128 goto cleanup;
129 }
130 if (aac_fib_complete(fibptr) != 0) {
131 retval = -EINVAL;
132 goto cleanup;
133 }
134 }
135 /*
136 * Make sure that the size returned by the adapter (which includes
137 * the header) is less than or equal to the size of a fib, so we
138 * don't corrupt application data. Then copy that size to the user
139 * buffer. (Don't try to add the header information again, since it
140 * was already included by the adapter.)
141 */
142
143 retval = 0;
144 if (copy_to_user(arg, (void *)kfib, size))
145 retval = -EFAULT;
146cleanup:
147 if (hw_fib) {
148 dma_free_coherent(&dev->pdev->dev, size, kfib,
149 fibptr->hw_fib_pa);
150 fibptr->hw_fib_pa = hw_fib_pa;
151 fibptr->hw_fib_va = hw_fib;
152 }
153 if (retval != -ERESTARTSYS)
154 aac_fib_free(fibptr);
155 return retval;
156}
157
158/**
159 * open_getadapter_fib - Get the next fib
160 * @dev: adapter is being processed
161 * @arg: arguments to the open call
162 *
163 * This routine will get the next Fib, if available, from the AdapterFibContext
164 * passed in from the user.
165 */
166static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
167{
168 struct aac_fib_context * fibctx;
169 int status;
170
171 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
172 if (fibctx == NULL) {
173 status = -ENOMEM;
174 } else {
175 unsigned long flags;
176 struct list_head * entry;
177 struct aac_fib_context * context;
178
179 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
180 fibctx->size = sizeof(struct aac_fib_context);
181 /*
182 * Yes yes, I know this could be an index, but we have a
183 * better guarantee of uniqueness for the locked loop below.
184 * Without the aid of a persistent history, this also helps
185 * reduce the chance that the opaque context would be reused.
186 */
187 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
188 /*
189 * Initialize the mutex used to wait for the next AIF.
190 */
191 init_completion(&fibctx->completion);
192 fibctx->wait = 0;
193 /*
194 * Initialize the fibs and set the count of fibs on
195 * the list to 0.
196 */
197 fibctx->count = 0;
198 INIT_LIST_HEAD(&fibctx->fib_list);
199 fibctx->jiffies = jiffies/HZ;
200 /*
201 * Now add this context onto the adapter's
202 * AdapterFibContext list.
203 */
204 spin_lock_irqsave(&dev->fib_lock, flags);
205 /* Ensure that we have a unique identifier */
206 entry = dev->fib_list.next;
207 while (entry != &dev->fib_list) {
208 context = list_entry(entry, struct aac_fib_context, next);
209 if (context->unique == fibctx->unique) {
210 /* Not unique (32 bits) */
211 fibctx->unique++;
212 entry = dev->fib_list.next;
213 } else {
214 entry = entry->next;
215 }
216 }
217 list_add_tail(&fibctx->next, &dev->fib_list);
218 spin_unlock_irqrestore(&dev->fib_lock, flags);
219 if (copy_to_user(arg, &fibctx->unique,
220 sizeof(fibctx->unique))) {
221 status = -EFAULT;
222 } else {
223 status = 0;
224 }
225 }
226 return status;
227}
228
229/**
230 * next_getadapter_fib - get the next fib
231 * @dev: adapter to use
232 * @arg: ioctl argument
233 *
234 * This routine will get the next Fib, if available, from the AdapterFibContext
235 * passed in from the user.
236 */
237static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
238{
239 struct fib_ioctl f;
240 struct fib *fib;
241 struct aac_fib_context *fibctx;
242 int status;
243 struct list_head * entry;
244 unsigned long flags;
245
246 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
247 return -EFAULT;
248 /*
249 * Verify that the HANDLE passed in was a valid AdapterFibContext
250 *
251 * Search the list of AdapterFibContext addresses on the adapter
252 * to be sure this is a valid address
253 */
254 spin_lock_irqsave(&dev->fib_lock, flags);
255 entry = dev->fib_list.next;
256 fibctx = NULL;
257
258 while (entry != &dev->fib_list) {
259 fibctx = list_entry(entry, struct aac_fib_context, next);
260 /*
261 * Extract the AdapterFibContext from the Input parameters.
262 */
263 if (fibctx->unique == f.fibctx) { /* We found a winner */
264 break;
265 }
266 entry = entry->next;
267 fibctx = NULL;
268 }
269 if (!fibctx) {
270 spin_unlock_irqrestore(&dev->fib_lock, flags);
271 dprintk ((KERN_INFO "Fib Context not found\n"));
272 return -EINVAL;
273 }
274
275 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
276 (fibctx->size != sizeof(struct aac_fib_context))) {
277 spin_unlock_irqrestore(&dev->fib_lock, flags);
278 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
279 return -EINVAL;
280 }
281 status = 0;
282 /*
283 * If there are no fibs to send back, then either wait or return
284 * -EAGAIN
285 */
286return_fib:
287 if (!list_empty(&fibctx->fib_list)) {
288 /*
289 * Pull the next fib from the fibs
290 */
291 entry = fibctx->fib_list.next;
292 list_del(entry);
293
294 fib = list_entry(entry, struct fib, fiblink);
295 fibctx->count--;
296 spin_unlock_irqrestore(&dev->fib_lock, flags);
297 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
298 kfree(fib->hw_fib_va);
299 kfree(fib);
300 return -EFAULT;
301 }
302 /*
303 * Free the space occupied by this copy of the fib.
304 */
305 kfree(fib->hw_fib_va);
306 kfree(fib);
307 status = 0;
308 } else {
309 spin_unlock_irqrestore(&dev->fib_lock, flags);
310 /* If someone killed the AIF aacraid thread, restart it */
311 status = !dev->aif_thread;
312 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
313 /* Be paranoid, be very paranoid! */
314 kthread_stop(dev->thread);
315 ssleep(1);
316 dev->aif_thread = 0;
317 dev->thread = kthread_run(aac_command_thread, dev,
318 "%s", dev->name);
319 ssleep(1);
320 }
321 if (f.wait) {
322 if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
323 status = -ERESTARTSYS;
324 } else {
325 /* Lock again and retry */
326 spin_lock_irqsave(&dev->fib_lock, flags);
327 goto return_fib;
328 }
329 } else {
330 status = -EAGAIN;
331 }
332 }
333 fibctx->jiffies = jiffies/HZ;
334 return status;
335}
336
337int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
338{
339 struct fib *fib;
340
341 /*
342 * First free any FIBs that have not been consumed.
343 */
344 while (!list_empty(&fibctx->fib_list)) {
345 struct list_head * entry;
346 /*
347 * Pull the next fib from the fibs
348 */
349 entry = fibctx->fib_list.next;
350 list_del(entry);
351 fib = list_entry(entry, struct fib, fiblink);
352 fibctx->count--;
353 /*
354 * Free the space occupied by this copy of the fib.
355 */
356 kfree(fib->hw_fib_va);
357 kfree(fib);
358 }
359 /*
360 * Remove the Context from the AdapterFibContext List
361 */
362 list_del(&fibctx->next);
363 /*
364 * Invalidate context
365 */
366 fibctx->type = 0;
367 /*
368 * Free the space occupied by the Context
369 */
370 kfree(fibctx);
371 return 0;
372}
373
374/**
375 * close_getadapter_fib - close down user fib context
376 * @dev: adapter
377 * @arg: ioctl arguments
378 *
379 * This routine will close down the fibctx passed in from the user.
380 */
381
382static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
383{
384 struct aac_fib_context *fibctx;
385 int status;
386 unsigned long flags;
387 struct list_head * entry;
388
389 /*
390 * Verify that the HANDLE passed in was a valid AdapterFibContext
391 *
392 * Search the list of AdapterFibContext addresses on the adapter
393 * to be sure this is a valid address
394 */
395
396 entry = dev->fib_list.next;
397 fibctx = NULL;
398
399 while(entry != &dev->fib_list) {
400 fibctx = list_entry(entry, struct aac_fib_context, next);
401 /*
402 * Extract the fibctx from the input parameters
403 */
404 if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
405 break;
406 entry = entry->next;
407 fibctx = NULL;
408 }
409
410 if (!fibctx)
411 return 0; /* Already gone */
412
413 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
414 (fibctx->size != sizeof(struct aac_fib_context)))
415 return -EINVAL;
416 spin_lock_irqsave(&dev->fib_lock, flags);
417 status = aac_close_fib_context(dev, fibctx);
418 spin_unlock_irqrestore(&dev->fib_lock, flags);
419 return status;
420}
421
422/**
423 * check_revision - close down user fib context
424 * @dev: adapter
425 * @arg: ioctl arguments
426 *
427 * This routine returns the driver version.
428 * Under Linux, there have been no version incompatibilities, so this is
429 * simple!
430 */
431
432static int check_revision(struct aac_dev *dev, void __user *arg)
433{
434 struct revision response;
435 char *driver_version = aac_driver_version;
436 u32 version;
437
438 response.compat = 1;
439 version = (simple_strtol(driver_version,
440 &driver_version, 10) << 24) | 0x00000400;
441 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
442 version += simple_strtol(driver_version + 1, NULL, 10);
443 response.version = cpu_to_le32(version);
444# ifdef AAC_DRIVER_BUILD
445 response.build = cpu_to_le32(AAC_DRIVER_BUILD);
446# else
447 response.build = cpu_to_le32(9999);
448# endif
449
450 if (copy_to_user(arg, &response, sizeof(response)))
451 return -EFAULT;
452 return 0;
453}
454
455
456/**
457 * aac_send_raw_scb
458 * @dev: adapter is being processed
459 * @arg: arguments to the send call
460 */
461static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
462{
463 struct fib* srbfib;
464 int status;
465 struct aac_srb *srbcmd = NULL;
466 struct aac_hba_cmd_req *hbacmd = NULL;
467 struct user_aac_srb *user_srbcmd = NULL;
468 struct user_aac_srb __user *user_srb = arg;
469 struct aac_srb_reply __user *user_reply;
470 u32 chn;
471 u32 fibsize = 0;
472 u32 flags = 0;
473 s32 rcode = 0;
474 u32 data_dir;
475 void __user *sg_user[HBA_MAX_SG_EMBEDDED];
476 void *sg_list[HBA_MAX_SG_EMBEDDED];
477 u32 sg_count[HBA_MAX_SG_EMBEDDED];
478 u32 sg_indx = 0;
479 u32 byte_count = 0;
480 u32 actual_fibsize64, actual_fibsize = 0;
481 int i;
482 int is_native_device;
483 u64 address;
484
485
486 if (dev->in_reset) {
487 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
488 return -EBUSY;
489 }
490 if (!capable(CAP_SYS_ADMIN)){
491 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
492 return -EPERM;
493 }
494 /*
495 * Allocate and initialize a Fib then setup a SRB command
496 */
497 if (!(srbfib = aac_fib_alloc(dev))) {
498 return -ENOMEM;
499 }
500
501 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
502 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
503 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
504 rcode = -EFAULT;
505 goto cleanup;
506 }
507
508 if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
509 (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
510 rcode = -EINVAL;
511 goto cleanup;
512 }
513
514 user_srbcmd = memdup_user(user_srb, fibsize);
515 if (IS_ERR(user_srbcmd)) {
516 rcode = PTR_ERR(user_srbcmd);
517 user_srbcmd = NULL;
518 goto cleanup;
519 }
520
521 flags = user_srbcmd->flags; /* from user in cpu order */
522 switch (flags & (SRB_DataIn | SRB_DataOut)) {
523 case SRB_DataOut:
524 data_dir = DMA_TO_DEVICE;
525 break;
526 case (SRB_DataIn | SRB_DataOut):
527 data_dir = DMA_BIDIRECTIONAL;
528 break;
529 case SRB_DataIn:
530 data_dir = DMA_FROM_DEVICE;
531 break;
532 default:
533 data_dir = DMA_NONE;
534 }
535 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
536 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
537 user_srbcmd->sg.count));
538 rcode = -EINVAL;
539 goto cleanup;
540 }
541 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
542 dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
543 rcode = -EINVAL;
544 goto cleanup;
545 }
546 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
547 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
548 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
549 (sizeof(struct sgentry64) - sizeof(struct sgentry));
550 /* User made a mistake - should not continue */
551 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
552 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
553 "Raw SRB command calculated fibsize=%lu;%lu "
554 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
555 "issued fibsize=%d\n",
556 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
557 sizeof(struct aac_srb), sizeof(struct sgentry),
558 sizeof(struct sgentry64), fibsize));
559 rcode = -EINVAL;
560 goto cleanup;
561 }
562
563 chn = user_srbcmd->channel;
564 if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
565 dev->hba_map[chn][user_srbcmd->id].devtype ==
566 AAC_DEVTYPE_NATIVE_RAW) {
567 is_native_device = 1;
568 hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
569 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
570
571 /* iu_type is a parameter of aac_hba_send */
572 switch (data_dir) {
573 case DMA_TO_DEVICE:
574 hbacmd->byte1 = 2;
575 break;
576 case DMA_FROM_DEVICE:
577 case DMA_BIDIRECTIONAL:
578 hbacmd->byte1 = 1;
579 break;
580 case DMA_NONE:
581 default:
582 break;
583 }
584 hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
585 hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
586
587 /*
588 * we fill in reply_qid later in aac_src_deliver_message
589 * we fill in iu_type, request_id later in aac_hba_send
590 * we fill in emb_data_desc_count, data_length later
591 * in sg list build
592 */
593
594 memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
595
596 address = (u64)srbfib->hw_error_pa;
597 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
598 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
599 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
600 hbacmd->emb_data_desc_count =
601 cpu_to_le32(user_srbcmd->sg.count);
602 srbfib->hbacmd_size = 64 +
603 user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
604
605 } else {
606 is_native_device = 0;
607 aac_fib_init(srbfib);
608
609 /* raw_srb FIB is not FastResponseCapable */
610 srbfib->hw_fib_va->header.XferState &=
611 ~cpu_to_le32(FastResponseCapable);
612
613 srbcmd = (struct aac_srb *) fib_data(srbfib);
614
615 // Fix up srb for endian and force some values
616
617 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
618 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
619 srbcmd->id = cpu_to_le32(user_srbcmd->id);
620 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
621 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
622 srbcmd->flags = cpu_to_le32(flags);
623 srbcmd->retry_limit = 0; // Obsolete parameter
624 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
625 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
626 }
627
628 byte_count = 0;
629 if (is_native_device) {
630 struct user_sgmap *usg32 = &user_srbcmd->sg;
631 struct user_sgmap64 *usg64 =
632 (struct user_sgmap64 *)&user_srbcmd->sg;
633
634 for (i = 0; i < usg32->count; i++) {
635 void *p;
636 u64 addr;
637
638 sg_count[i] = (actual_fibsize64 == fibsize) ?
639 usg64->sg[i].count : usg32->sg[i].count;
640 if (sg_count[i] >
641 (dev->scsi_host_ptr->max_sectors << 9)) {
642 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
643 i, sg_count[i],
644 dev->scsi_host_ptr->max_sectors << 9);
645 rcode = -EINVAL;
646 goto cleanup;
647 }
648
649 p = kmalloc(sg_count[i], GFP_KERNEL);
650 if (!p) {
651 rcode = -ENOMEM;
652 goto cleanup;
653 }
654
655 if (actual_fibsize64 == fibsize) {
656 addr = (u64)usg64->sg[i].addr[0];
657 addr += ((u64)usg64->sg[i].addr[1]) << 32;
658 } else {
659 addr = (u64)usg32->sg[i].addr;
660 }
661
662 sg_user[i] = (void __user *)(uintptr_t)addr;
663 sg_list[i] = p; // save so we can clean up later
664 sg_indx = i;
665
666 if (flags & SRB_DataOut) {
667 if (copy_from_user(p, sg_user[i],
668 sg_count[i])) {
669 rcode = -EFAULT;
670 goto cleanup;
671 }
672 }
673 addr = pci_map_single(dev->pdev, p, sg_count[i],
674 data_dir);
675 hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
676 hbacmd->sge[i].addr_lo = cpu_to_le32(
677 (u32)(addr & 0xffffffff));
678 hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
679 hbacmd->sge[i].flags = 0;
680 byte_count += sg_count[i];
681 }
682
683 if (usg32->count > 0) /* embedded sglist */
684 hbacmd->sge[usg32->count-1].flags =
685 cpu_to_le32(0x40000000);
686 hbacmd->data_length = cpu_to_le32(byte_count);
687
688 status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
689 NULL, NULL);
690
691 } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
692 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
693 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
694
695 /*
696 * This should also catch if user used the 32 bit sgmap
697 */
698 if (actual_fibsize64 == fibsize) {
699 actual_fibsize = actual_fibsize64;
700 for (i = 0; i < upsg->count; i++) {
701 u64 addr;
702 void* p;
703
704 sg_count[i] = upsg->sg[i].count;
705 if (sg_count[i] >
706 ((dev->adapter_info.options &
707 AAC_OPT_NEW_COMM) ?
708 (dev->scsi_host_ptr->max_sectors << 9) :
709 65536)) {
710 rcode = -EINVAL;
711 goto cleanup;
712 }
713
714 p = kmalloc(sg_count[i], GFP_KERNEL);
715 if(!p) {
716 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
717 sg_count[i], i, upsg->count));
718 rcode = -ENOMEM;
719 goto cleanup;
720 }
721 addr = (u64)upsg->sg[i].addr[0];
722 addr += ((u64)upsg->sg[i].addr[1]) << 32;
723 sg_user[i] = (void __user *)(uintptr_t)addr;
724 sg_list[i] = p; // save so we can clean up later
725 sg_indx = i;
726
727 if (flags & SRB_DataOut) {
728 if (copy_from_user(p, sg_user[i],
729 sg_count[i])){
730 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
731 rcode = -EFAULT;
732 goto cleanup;
733 }
734 }
735 addr = pci_map_single(dev->pdev, p,
736 sg_count[i], data_dir);
737
738 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
739 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
740 byte_count += sg_count[i];
741 psg->sg[i].count = cpu_to_le32(sg_count[i]);
742 }
743 } else {
744 struct user_sgmap* usg;
745 usg = kmemdup(upsg,
746 actual_fibsize - sizeof(struct aac_srb)
747 + sizeof(struct sgmap), GFP_KERNEL);
748 if (!usg) {
749 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
750 rcode = -ENOMEM;
751 goto cleanup;
752 }
753 actual_fibsize = actual_fibsize64;
754
755 for (i = 0; i < usg->count; i++) {
756 u64 addr;
757 void* p;
758
759 sg_count[i] = usg->sg[i].count;
760 if (sg_count[i] >
761 ((dev->adapter_info.options &
762 AAC_OPT_NEW_COMM) ?
763 (dev->scsi_host_ptr->max_sectors << 9) :
764 65536)) {
765 kfree(usg);
766 rcode = -EINVAL;
767 goto cleanup;
768 }
769
770 p = kmalloc(sg_count[i], GFP_KERNEL);
771 if(!p) {
772 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
773 sg_count[i], i, usg->count));
774 kfree(usg);
775 rcode = -ENOMEM;
776 goto cleanup;
777 }
778 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
779 sg_list[i] = p; // save so we can clean up later
780 sg_indx = i;
781
782 if (flags & SRB_DataOut) {
783 if (copy_from_user(p, sg_user[i],
784 sg_count[i])) {
785 kfree (usg);
786 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
787 rcode = -EFAULT;
788 goto cleanup;
789 }
790 }
791 addr = pci_map_single(dev->pdev, p,
792 sg_count[i], data_dir);
793
794 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
795 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
796 byte_count += sg_count[i];
797 psg->sg[i].count = cpu_to_le32(sg_count[i]);
798 }
799 kfree (usg);
800 }
801 srbcmd->count = cpu_to_le32(byte_count);
802 if (user_srbcmd->sg.count)
803 psg->count = cpu_to_le32(sg_indx+1);
804 else
805 psg->count = 0;
806 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
807 } else {
808 struct user_sgmap* upsg = &user_srbcmd->sg;
809 struct sgmap* psg = &srbcmd->sg;
810
811 if (actual_fibsize64 == fibsize) {
812 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
813 for (i = 0; i < upsg->count; i++) {
814 uintptr_t addr;
815 void* p;
816
817 sg_count[i] = usg->sg[i].count;
818 if (sg_count[i] >
819 ((dev->adapter_info.options &
820 AAC_OPT_NEW_COMM) ?
821 (dev->scsi_host_ptr->max_sectors << 9) :
822 65536)) {
823 rcode = -EINVAL;
824 goto cleanup;
825 }
826 p = kmalloc(sg_count[i], GFP_KERNEL);
827 if (!p) {
828 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
829 sg_count[i], i, usg->count));
830 rcode = -ENOMEM;
831 goto cleanup;
832 }
833 addr = (u64)usg->sg[i].addr[0];
834 addr += ((u64)usg->sg[i].addr[1]) << 32;
835 sg_user[i] = (void __user *)addr;
836 sg_list[i] = p; // save so we can clean up later
837 sg_indx = i;
838
839 if (flags & SRB_DataOut) {
840 if (copy_from_user(p, sg_user[i],
841 sg_count[i])){
842 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
843 rcode = -EFAULT;
844 goto cleanup;
845 }
846 }
847 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
848
849 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
850 byte_count += usg->sg[i].count;
851 psg->sg[i].count = cpu_to_le32(sg_count[i]);
852 }
853 } else {
854 for (i = 0; i < upsg->count; i++) {
855 dma_addr_t addr;
856 void* p;
857
858 sg_count[i] = upsg->sg[i].count;
859 if (sg_count[i] >
860 ((dev->adapter_info.options &
861 AAC_OPT_NEW_COMM) ?
862 (dev->scsi_host_ptr->max_sectors << 9) :
863 65536)) {
864 rcode = -EINVAL;
865 goto cleanup;
866 }
867 p = kmalloc(sg_count[i], GFP_KERNEL);
868 if (!p) {
869 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
870 sg_count[i], i, upsg->count));
871 rcode = -ENOMEM;
872 goto cleanup;
873 }
874 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
875 sg_list[i] = p; // save so we can clean up later
876 sg_indx = i;
877
878 if (flags & SRB_DataOut) {
879 if (copy_from_user(p, sg_user[i],
880 sg_count[i])) {
881 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
882 rcode = -EFAULT;
883 goto cleanup;
884 }
885 }
886 addr = pci_map_single(dev->pdev, p,
887 sg_count[i], data_dir);
888
889 psg->sg[i].addr = cpu_to_le32(addr);
890 byte_count += sg_count[i];
891 psg->sg[i].count = cpu_to_le32(sg_count[i]);
892 }
893 }
894 srbcmd->count = cpu_to_le32(byte_count);
895 if (user_srbcmd->sg.count)
896 psg->count = cpu_to_le32(sg_indx+1);
897 else
898 psg->count = 0;
899 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
900 }
901
902 if (status == -ERESTARTSYS) {
903 rcode = -ERESTARTSYS;
904 goto cleanup;
905 }
906
907 if (status != 0) {
908 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
909 rcode = -ENXIO;
910 goto cleanup;
911 }
912
913 if (flags & SRB_DataIn) {
914 for(i = 0 ; i <= sg_indx; i++){
915 if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
916 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
917 rcode = -EFAULT;
918 goto cleanup;
919
920 }
921 }
922 }
923
924 user_reply = arg + fibsize;
925 if (is_native_device) {
926 struct aac_hba_resp *err =
927 &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
928 struct aac_srb_reply reply;
929
930 memset(&reply, 0, sizeof(reply));
931 reply.status = ST_OK;
932 if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
933 /* fast response */
934 reply.srb_status = SRB_STATUS_SUCCESS;
935 reply.scsi_status = 0;
936 reply.data_xfer_length = byte_count;
937 reply.sense_data_size = 0;
938 memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
939 } else {
940 reply.srb_status = err->service_response;
941 reply.scsi_status = err->status;
942 reply.data_xfer_length = byte_count -
943 le32_to_cpu(err->residual_count);
944 reply.sense_data_size = err->sense_response_data_len;
945 memcpy(reply.sense_data, err->sense_response_buf,
946 AAC_SENSE_BUFFERSIZE);
947 }
948 if (copy_to_user(user_reply, &reply,
949 sizeof(struct aac_srb_reply))) {
950 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
951 rcode = -EFAULT;
952 goto cleanup;
953 }
954 } else {
955 struct aac_srb_reply *reply;
956
957 reply = (struct aac_srb_reply *) fib_data(srbfib);
958 if (copy_to_user(user_reply, reply,
959 sizeof(struct aac_srb_reply))) {
960 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
961 rcode = -EFAULT;
962 goto cleanup;
963 }
964 }
965
966cleanup:
967 kfree(user_srbcmd);
968 if (rcode != -ERESTARTSYS) {
969 for (i = 0; i <= sg_indx; i++)
970 kfree(sg_list[i]);
971 aac_fib_complete(srbfib);
972 aac_fib_free(srbfib);
973 }
974
975 return rcode;
976}
977
978struct aac_pci_info {
979 u32 bus;
980 u32 slot;
981};
982
983
984static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
985{
986 struct aac_pci_info pci_info;
987
988 pci_info.bus = dev->pdev->bus->number;
989 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
990
991 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
992 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
993 return -EFAULT;
994 }
995 return 0;
996}
997
998static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
999{
1000 struct aac_hba_info hbainfo;
1001
1002 memset(&hbainfo, 0, sizeof(hbainfo));
1003 hbainfo.adapter_number = (u8) dev->id;
1004 hbainfo.system_io_bus_number = dev->pdev->bus->number;
1005 hbainfo.device_number = (dev->pdev->devfn >> 3);
1006 hbainfo.function_number = (dev->pdev->devfn & 0x0007);
1007
1008 hbainfo.vendor_id = dev->pdev->vendor;
1009 hbainfo.device_id = dev->pdev->device;
1010 hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
1011 hbainfo.sub_system_id = dev->pdev->subsystem_device;
1012
1013 if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
1014 dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
1015 return -EFAULT;
1016 }
1017
1018 return 0;
1019}
1020
1021struct aac_reset_iop {
1022 u8 reset_type;
1023};
1024
1025static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
1026{
1027 struct aac_reset_iop reset;
1028 int retval;
1029
1030 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
1031 return -EFAULT;
1032
1033 dev->adapter_shutdown = 1;
1034
1035 mutex_unlock(&dev->ioctl_mutex);
1036 retval = aac_reset_adapter(dev, 0, reset.reset_type);
1037 mutex_lock(&dev->ioctl_mutex);
1038
1039 return retval;
1040}
1041
1042int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
1043{
1044 int status;
1045
1046 mutex_lock(&dev->ioctl_mutex);
1047
1048 if (dev->adapter_shutdown) {
1049 status = -EACCES;
1050 goto cleanup;
1051 }
1052
1053 /*
1054 * HBA gets first crack
1055 */
1056
1057 status = aac_dev_ioctl(dev, cmd, arg);
1058 if (status != -ENOTTY)
1059 goto cleanup;
1060
1061 switch (cmd) {
1062 case FSACTL_MINIPORT_REV_CHECK:
1063 status = check_revision(dev, arg);
1064 break;
1065 case FSACTL_SEND_LARGE_FIB:
1066 case FSACTL_SENDFIB:
1067 status = ioctl_send_fib(dev, arg);
1068 break;
1069 case FSACTL_OPEN_GET_ADAPTER_FIB:
1070 status = open_getadapter_fib(dev, arg);
1071 break;
1072 case FSACTL_GET_NEXT_ADAPTER_FIB:
1073 status = next_getadapter_fib(dev, arg);
1074 break;
1075 case FSACTL_CLOSE_GET_ADAPTER_FIB:
1076 status = close_getadapter_fib(dev, arg);
1077 break;
1078 case FSACTL_SEND_RAW_SRB:
1079 status = aac_send_raw_srb(dev,arg);
1080 break;
1081 case FSACTL_GET_PCI_INFO:
1082 status = aac_get_pci_info(dev,arg);
1083 break;
1084 case FSACTL_GET_HBA_INFO:
1085 status = aac_get_hba_info(dev, arg);
1086 break;
1087 case FSACTL_RESET_IOP:
1088 status = aac_send_reset_adapter(dev, arg);
1089 break;
1090
1091 default:
1092 status = -ENOTTY;
1093 break;
1094 }
1095
1096cleanup:
1097 mutex_unlock(&dev->ioctl_mutex);
1098
1099 return status;
1100}
1101