[SCSI] remove aic7xxx busyq

The aic7xxx driver has two spurious queues in it's linux glue code: the
busyq which queues incoming commands to the driver and the completeq
which queues finished commands before sending them back to the mid-layer

This patch just removes the busyq and makes the aic finally return the
correct status to get the mid-layer to manage its queueing, so a command
is either committed to the sequencer or returned to the midlayer for
requeue.

Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>

authored by James Bottomley and committed by James Bottomley e4e360c3 fad01ef8

+206 -453
+205 -450
drivers/scsi/aic7xxx/aic7xxx_osm.c
··· 458 458 u_int); 459 459 static void ahc_linux_free_device(struct ahc_softc*, 460 460 struct ahc_linux_device*); 461 - static void ahc_linux_run_device_queue(struct ahc_softc*, 462 - struct ahc_linux_device*); 461 + static int ahc_linux_run_command(struct ahc_softc*, 462 + struct ahc_linux_device *, 463 + struct scsi_cmnd *); 463 464 static void ahc_linux_setup_tag_info_global(char *p); 464 465 static aic_option_callback_t ahc_linux_setup_tag_info; 465 466 static int aic7xxx_setup(char *s); 466 467 static int ahc_linux_next_unit(void); 467 - static void ahc_runq_tasklet(unsigned long data); 468 468 static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc); 469 469 470 470 /********************************* Inlines ************************************/ 471 - static __inline void ahc_schedule_runq(struct ahc_softc *ahc); 472 471 static __inline struct ahc_linux_device* 473 472 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, 474 473 u_int target, u_int lun, int alloc); 475 474 static __inline void ahc_schedule_completeq(struct ahc_softc *ahc); 476 - static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc, 477 - struct ahc_linux_device *dev); 478 - static __inline struct ahc_linux_device * 479 - ahc_linux_next_device_to_run(struct ahc_softc *ahc); 480 - static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc); 481 475 static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); 482 476 483 477 static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, ··· 486 492 ahc->platform_data->completeq_timer.expires = jiffies; 487 493 add_timer(&ahc->platform_data->completeq_timer); 488 494 } 489 - } 490 - 491 - /* 492 - * Must be called with our lock held. 493 - */ 494 - static __inline void 495 - ahc_schedule_runq(struct ahc_softc *ahc) 496 - { 497 - tasklet_schedule(&ahc->platform_data->runq_tasklet); 498 495 } 499 496 500 497 static __inline struct ahc_linux_device* ··· 551 566 } 552 567 ahc_done_unlock(ahc, &done_flags); 553 568 return (acmd); 554 - } 555 - 556 - static __inline void 557 - ahc_linux_check_device_queue(struct ahc_softc *ahc, 558 - struct ahc_linux_device *dev) 559 - { 560 - if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0 561 - && dev->active == 0) { 562 - dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY; 563 - dev->qfrozen--; 564 - } 565 - 566 - if (TAILQ_FIRST(&dev->busyq) == NULL 567 - || dev->openings == 0 || dev->qfrozen != 0) 568 - return; 569 - 570 - ahc_linux_run_device_queue(ahc, dev); 571 - } 572 - 573 - static __inline struct ahc_linux_device * 574 - ahc_linux_next_device_to_run(struct ahc_softc *ahc) 575 - { 576 - 577 - if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 578 - || (ahc->platform_data->qfrozen != 0)) 579 - return (NULL); 580 - return (TAILQ_FIRST(&ahc->platform_data->device_runq)); 581 - } 582 - 583 - static __inline void 584 - ahc_linux_run_device_queues(struct ahc_softc *ahc) 585 - { 586 - struct ahc_linux_device *dev; 587 - 588 - while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) { 589 - TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links); 590 - dev->flags &= ~AHC_DEV_ON_RUN_LIST; 591 - ahc_linux_check_device_queue(ahc, dev); 592 - } 593 569 } 594 570 595 571 static __inline void ··· 817 871 { 818 872 struct ahc_softc *ahc; 819 873 struct ahc_linux_device *dev; 820 - u_long flags; 821 874 822 875 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 823 876 ··· 825 880 */ 826 881 cmd->scsi_done = scsi_done; 827 882 828 - ahc_midlayer_entrypoint_lock(ahc, &flags); 829 - 830 883 /* 831 884 * Close the race of a command that was in the process of 832 885 * being queued to us just as our simq was frozen. Let 833 886 * DV commands through so long as we are only frozen to 834 887 * perform DV. 835 888 */ 836 - if (ahc->platform_data->qfrozen != 0) { 889 + if (ahc->platform_data->qfrozen != 0) 890 + return SCSI_MLQUEUE_HOST_BUSY; 837 891 838 - ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ); 839 - ahc_linux_queue_cmd_complete(ahc, cmd); 840 - ahc_schedule_completeq(ahc); 841 - ahc_midlayer_entrypoint_unlock(ahc, &flags); 842 - return (0); 843 - } 844 892 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, 845 893 cmd->device->lun, /*alloc*/TRUE); 846 - if (dev == NULL) { 847 - ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL); 848 - ahc_linux_queue_cmd_complete(ahc, cmd); 849 - ahc_schedule_completeq(ahc); 850 - ahc_midlayer_entrypoint_unlock(ahc, &flags); 851 - printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n", 852 - ahc_name(ahc)); 853 - return (0); 854 - } 894 + BUG_ON(dev == NULL); 895 + 855 896 cmd->result = CAM_REQ_INPROG << 16; 856 - TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe); 857 - if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { 858 - TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); 859 - dev->flags |= AHC_DEV_ON_RUN_LIST; 860 - ahc_linux_run_device_queues(ahc); 861 - } 862 - ahc_midlayer_entrypoint_unlock(ahc, &flags); 863 - return (0); 897 + 898 + return ahc_linux_run_command(ahc, dev, cmd); 864 899 } 865 900 866 901 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ··· 912 987 if (dev != NULL 913 988 && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) { 914 989 dev->flags |= AHC_DEV_UNCONFIGURED; 915 - if (TAILQ_EMPTY(&dev->busyq) 916 - && dev->active == 0 990 + if (dev->active == 0 917 991 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) 918 992 ahc_linux_free_device(ahc, dev); 919 993 } ··· 1129 1205 }; 1130 1206 1131 1207 /**************************** Tasklet Handler *********************************/ 1132 - 1133 - /* 1134 - * In 2.4.X and above, this routine is called from a tasklet, 1135 - * so we must re-acquire our lock prior to executing this code. 1136 - * In all prior kernels, ahc_schedule_runq() calls this routine 1137 - * directly and ahc_schedule_runq() is called with our lock held. 1138 - */ 1139 - static void 1140 - ahc_runq_tasklet(unsigned long data) 1141 - { 1142 - struct ahc_softc* ahc; 1143 - struct ahc_linux_device *dev; 1144 - u_long flags; 1145 - 1146 - ahc = (struct ahc_softc *)data; 1147 - ahc_lock(ahc, &flags); 1148 - while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) { 1149 - 1150 - TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links); 1151 - dev->flags &= ~AHC_DEV_ON_RUN_LIST; 1152 - ahc_linux_check_device_queue(ahc, dev); 1153 - /* Yeild to our interrupt handler */ 1154 - ahc_unlock(ahc, &flags); 1155 - ahc_lock(ahc, &flags); 1156 - } 1157 - ahc_unlock(ahc, &flags); 1158 - } 1159 1208 1160 1209 /******************************** Macros **************************************/ 1161 1210 #define BUILD_SCSIID(ahc, cmd) \ ··· 1625 1728 ahc->platform_data->completeq_timer.function = 1626 1729 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue; 1627 1730 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); 1628 - tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet, 1629 - (unsigned long)ahc); 1630 1731 ahc->seltime = (aic7xxx_seltime & 0x3) << 4; 1631 1732 ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; 1632 1733 if (aic7xxx_pci_parity == 0) ··· 1642 1747 1643 1748 if (ahc->platform_data != NULL) { 1644 1749 del_timer_sync(&ahc->platform_data->completeq_timer); 1645 - tasklet_kill(&ahc->platform_data->runq_tasklet); 1646 1750 if (ahc->platform_data->host != NULL) { 1647 1751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1648 1752 scsi_remove_host(ahc->platform_data->host); ··· 1800 1906 ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, 1801 1907 int lun, u_int tag, role_t role, uint32_t status) 1802 1908 { 1803 - int chan; 1804 - int maxchan; 1805 - int targ; 1806 - int maxtarg; 1807 - int clun; 1808 - int maxlun; 1809 - int count; 1810 - 1811 - if (tag != SCB_LIST_NULL) 1812 - return (0); 1813 - 1814 - chan = 0; 1815 - if (channel != ALL_CHANNELS) { 1816 - chan = channel - 'A'; 1817 - maxchan = chan + 1; 1818 - } else { 1819 - maxchan = (ahc->features & AHC_TWIN) ? 2 : 1; 1820 - } 1821 - targ = 0; 1822 - if (target != CAM_TARGET_WILDCARD) { 1823 - targ = target; 1824 - maxtarg = targ + 1; 1825 - } else { 1826 - maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8; 1827 - } 1828 - clun = 0; 1829 - if (lun != CAM_LUN_WILDCARD) { 1830 - clun = lun; 1831 - maxlun = clun + 1; 1832 - } else { 1833 - maxlun = AHC_NUM_LUNS; 1834 - } 1835 - 1836 - count = 0; 1837 - for (; chan < maxchan; chan++) { 1838 - 1839 - for (; targ < maxtarg; targ++) { 1840 - 1841 - for (; clun < maxlun; clun++) { 1842 - struct ahc_linux_device *dev; 1843 - struct ahc_busyq *busyq; 1844 - struct ahc_cmd *acmd; 1845 - 1846 - dev = ahc_linux_get_device(ahc, chan, 1847 - targ, clun, 1848 - /*alloc*/FALSE); 1849 - if (dev == NULL) 1850 - continue; 1851 - 1852 - busyq = &dev->busyq; 1853 - while ((acmd = TAILQ_FIRST(busyq)) != NULL) { 1854 - Scsi_Cmnd *cmd; 1855 - 1856 - cmd = &acmd_scsi_cmd(acmd); 1857 - TAILQ_REMOVE(busyq, acmd, 1858 - acmd_links.tqe); 1859 - count++; 1860 - cmd->result = status << 16; 1861 - ahc_linux_queue_cmd_complete(ahc, cmd); 1862 - } 1863 - } 1864 - } 1865 - } 1866 - 1867 - return (count); 1909 + return 0; 1868 1910 } 1869 1911 1870 1912 static void ··· 1875 2045 } 1876 2046 } 1877 2047 1878 - static void 1879 - ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev) 2048 + static int 2049 + ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, 2050 + struct scsi_cmnd *cmd) 1880 2051 { 1881 - struct ahc_cmd *acmd; 1882 - struct scsi_cmnd *cmd; 1883 2052 struct scb *scb; 1884 2053 struct hardware_scb *hscb; 1885 2054 struct ahc_initiator_tinfo *tinfo; 1886 2055 struct ahc_tmode_tstate *tstate; 1887 2056 uint16_t mask; 2057 + struct scb_tailq *untagged_q = NULL; 1888 2058 1889 - if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0) 1890 - panic("running device on run list"); 2059 + /* 2060 + * Schedule us to run later. The only reason we are not 2061 + * running is because the whole controller Q is frozen. 2062 + */ 2063 + if (ahc->platform_data->qfrozen != 0) 2064 + return SCSI_MLQUEUE_HOST_BUSY; 1891 2065 1892 - while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL 1893 - && dev->openings > 0 && dev->qfrozen == 0) { 2066 + /* 2067 + * We only allow one untagged transaction 2068 + * per target in the initiator role unless 2069 + * we are storing a full busy target *lun* 2070 + * table in SCB space. 2071 + */ 2072 + if (!blk_rq_tagged(cmd->request) 2073 + && (ahc->features & AHC_SCB_BTT) == 0) { 2074 + int target_offset; 1894 2075 1895 - /* 1896 - * Schedule us to run later. The only reason we are not 1897 - * running is because the whole controller Q is frozen. 1898 - */ 1899 - if (ahc->platform_data->qfrozen != 0) { 1900 - TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, 1901 - dev, links); 1902 - dev->flags |= AHC_DEV_ON_RUN_LIST; 1903 - return; 1904 - } 1905 - /* 1906 - * Get an scb to use. 1907 - */ 1908 - if ((scb = ahc_get_scb(ahc)) == NULL) { 1909 - TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, 1910 - dev, links); 1911 - dev->flags |= AHC_DEV_ON_RUN_LIST; 1912 - ahc->flags |= AHC_RESOURCE_SHORTAGE; 1913 - return; 1914 - } 1915 - TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe); 1916 - cmd = &acmd_scsi_cmd(acmd); 1917 - scb->io_ctx = cmd; 1918 - scb->platform_data->dev = dev; 1919 - hscb = scb->hscb; 1920 - cmd->host_scribble = (char *)scb; 1921 - 1922 - /* 1923 - * Fill out basics of the HSCB. 1924 - */ 1925 - hscb->control = 0; 1926 - hscb->scsiid = BUILD_SCSIID(ahc, cmd); 1927 - hscb->lun = cmd->device->lun; 1928 - mask = SCB_GET_TARGET_MASK(ahc, scb); 1929 - tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), 1930 - SCB_GET_OUR_ID(scb), 1931 - SCB_GET_TARGET(ahc, scb), &tstate); 1932 - hscb->scsirate = tinfo->scsirate; 1933 - hscb->scsioffset = tinfo->curr.offset; 1934 - if ((tstate->ultraenb & mask) != 0) 1935 - hscb->control |= ULTRAENB; 1936 - 1937 - if ((ahc->user_discenable & mask) != 0) 1938 - hscb->control |= DISCENB; 1939 - 1940 - if ((tstate->auto_negotiate & mask) != 0) { 1941 - scb->flags |= SCB_AUTO_NEGOTIATE; 1942 - scb->hscb->control |= MK_MESSAGE; 1943 - } 1944 - 1945 - if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { 1946 - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1947 - int msg_bytes; 1948 - uint8_t tag_msgs[2]; 1949 - 1950 - msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); 1951 - if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { 1952 - hscb->control |= tag_msgs[0]; 1953 - if (tag_msgs[0] == MSG_ORDERED_TASK) 1954 - dev->commands_since_idle_or_otag = 0; 1955 - } else 1956 - #endif 1957 - if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH 1958 - && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { 1959 - hscb->control |= MSG_ORDERED_TASK; 1960 - dev->commands_since_idle_or_otag = 0; 1961 - } else { 1962 - hscb->control |= MSG_SIMPLE_TASK; 1963 - } 1964 - } 1965 - 1966 - hscb->cdb_len = cmd->cmd_len; 1967 - if (hscb->cdb_len <= 12) { 1968 - memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); 1969 - } else { 1970 - memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); 1971 - scb->flags |= SCB_CDB32_PTR; 1972 - } 1973 - 1974 - scb->platform_data->xfer_len = 0; 1975 - ahc_set_residual(scb, 0); 1976 - ahc_set_sense_residual(scb, 0); 1977 - scb->sg_count = 0; 1978 - if (cmd->use_sg != 0) { 1979 - struct ahc_dma_seg *sg; 1980 - struct scatterlist *cur_seg; 1981 - struct scatterlist *end_seg; 1982 - int nseg; 1983 - 1984 - cur_seg = (struct scatterlist *)cmd->request_buffer; 1985 - nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, 1986 - cmd->sc_data_direction); 1987 - end_seg = cur_seg + nseg; 1988 - /* Copy the segments into the SG list. */ 1989 - sg = scb->sg_list; 1990 - /* 1991 - * The sg_count may be larger than nseg if 1992 - * a transfer crosses a 32bit page. 1993 - */ 1994 - while (cur_seg < end_seg) { 1995 - dma_addr_t addr; 1996 - bus_size_t len; 1997 - int consumed; 1998 - 1999 - addr = sg_dma_address(cur_seg); 2000 - len = sg_dma_len(cur_seg); 2001 - consumed = ahc_linux_map_seg(ahc, scb, 2002 - sg, addr, len); 2003 - sg += consumed; 2004 - scb->sg_count += consumed; 2005 - cur_seg++; 2006 - } 2007 - sg--; 2008 - sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 2009 - 2010 - /* 2011 - * Reset the sg list pointer. 2012 - */ 2013 - scb->hscb->sgptr = 2014 - ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); 2015 - 2016 - /* 2017 - * Copy the first SG into the "current" 2018 - * data pointer area. 2019 - */ 2020 - scb->hscb->dataptr = scb->sg_list->addr; 2021 - scb->hscb->datacnt = scb->sg_list->len; 2022 - } else if (cmd->request_bufflen != 0) { 2023 - struct ahc_dma_seg *sg; 2024 - dma_addr_t addr; 2025 - 2026 - sg = scb->sg_list; 2027 - addr = pci_map_single(ahc->dev_softc, 2028 - cmd->request_buffer, 2029 - cmd->request_bufflen, 2030 - cmd->sc_data_direction); 2031 - scb->platform_data->buf_busaddr = addr; 2032 - scb->sg_count = ahc_linux_map_seg(ahc, scb, 2033 - sg, addr, 2034 - cmd->request_bufflen); 2035 - sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 2036 - 2037 - /* 2038 - * Reset the sg list pointer. 2039 - */ 2040 - scb->hscb->sgptr = 2041 - ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); 2042 - 2043 - /* 2044 - * Copy the first SG into the "current" 2045 - * data pointer area. 2046 - */ 2047 - scb->hscb->dataptr = sg->addr; 2048 - scb->hscb->datacnt = sg->len; 2049 - } else { 2050 - scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 2051 - scb->hscb->dataptr = 0; 2052 - scb->hscb->datacnt = 0; 2053 - scb->sg_count = 0; 2054 - } 2055 - 2056 - ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE); 2057 - LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 2058 - dev->openings--; 2059 - dev->active++; 2060 - dev->commands_issued++; 2061 - if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) 2062 - dev->commands_since_idle_or_otag++; 2063 - 2064 - /* 2065 - * We only allow one untagged transaction 2066 - * per target in the initiator role unless 2067 - * we are storing a full busy target *lun* 2068 - * table in SCB space. 2069 - */ 2070 - if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 2071 - && (ahc->features & AHC_SCB_BTT) == 0) { 2072 - struct scb_tailq *untagged_q; 2073 - int target_offset; 2074 - 2075 - target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 2076 - untagged_q = &(ahc->untagged_queues[target_offset]); 2077 - TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 2078 - scb->flags |= SCB_UNTAGGEDQ; 2079 - if (TAILQ_FIRST(untagged_q) != scb) 2080 - continue; 2081 - } 2082 - scb->flags |= SCB_ACTIVE; 2083 - ahc_queue_scb(ahc, scb); 2076 + target_offset = cmd->device->id + cmd->device->channel * 8; 2077 + untagged_q = &(ahc->untagged_queues[target_offset]); 2078 + if (!TAILQ_EMPTY(untagged_q)) 2079 + /* if we're already executing an untagged command 2080 + * we're busy to another */ 2081 + return SCSI_MLQUEUE_DEVICE_BUSY; 2084 2082 } 2083 + 2084 + /* 2085 + * Get an scb to use. 2086 + */ 2087 + if ((scb = ahc_get_scb(ahc)) == NULL) { 2088 + ahc->flags |= AHC_RESOURCE_SHORTAGE; 2089 + return SCSI_MLQUEUE_HOST_BUSY; 2090 + } 2091 + 2092 + scb->io_ctx = cmd; 2093 + scb->platform_data->dev = dev; 2094 + hscb = scb->hscb; 2095 + cmd->host_scribble = (char *)scb; 2096 + 2097 + /* 2098 + * Fill out basics of the HSCB. 2099 + */ 2100 + hscb->control = 0; 2101 + hscb->scsiid = BUILD_SCSIID(ahc, cmd); 2102 + hscb->lun = cmd->device->lun; 2103 + mask = SCB_GET_TARGET_MASK(ahc, scb); 2104 + tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), 2105 + SCB_GET_OUR_ID(scb), 2106 + SCB_GET_TARGET(ahc, scb), &tstate); 2107 + hscb->scsirate = tinfo->scsirate; 2108 + hscb->scsioffset = tinfo->curr.offset; 2109 + if ((tstate->ultraenb & mask) != 0) 2110 + hscb->control |= ULTRAENB; 2111 + 2112 + if ((ahc->user_discenable & mask) != 0) 2113 + hscb->control |= DISCENB; 2114 + 2115 + if ((tstate->auto_negotiate & mask) != 0) { 2116 + scb->flags |= SCB_AUTO_NEGOTIATE; 2117 + scb->hscb->control |= MK_MESSAGE; 2118 + } 2119 + 2120 + if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { 2121 + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 2122 + int msg_bytes; 2123 + uint8_t tag_msgs[2]; 2124 + 2125 + msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); 2126 + if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { 2127 + hscb->control |= tag_msgs[0]; 2128 + if (tag_msgs[0] == MSG_ORDERED_TASK) 2129 + dev->commands_since_idle_or_otag = 0; 2130 + } else 2131 + #endif 2132 + if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH 2133 + && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { 2134 + hscb->control |= MSG_ORDERED_TASK; 2135 + dev->commands_since_idle_or_otag = 0; 2136 + } else { 2137 + hscb->control |= MSG_SIMPLE_TASK; 2138 + } 2139 + } 2140 + 2141 + hscb->cdb_len = cmd->cmd_len; 2142 + if (hscb->cdb_len <= 12) { 2143 + memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); 2144 + } else { 2145 + memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); 2146 + scb->flags |= SCB_CDB32_PTR; 2147 + } 2148 + 2149 + scb->platform_data->xfer_len = 0; 2150 + ahc_set_residual(scb, 0); 2151 + ahc_set_sense_residual(scb, 0); 2152 + scb->sg_count = 0; 2153 + if (cmd->use_sg != 0) { 2154 + struct ahc_dma_seg *sg; 2155 + struct scatterlist *cur_seg; 2156 + struct scatterlist *end_seg; 2157 + int nseg; 2158 + 2159 + cur_seg = (struct scatterlist *)cmd->request_buffer; 2160 + nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, 2161 + cmd->sc_data_direction); 2162 + end_seg = cur_seg + nseg; 2163 + /* Copy the segments into the SG list. */ 2164 + sg = scb->sg_list; 2165 + /* 2166 + * The sg_count may be larger than nseg if 2167 + * a transfer crosses a 32bit page. 2168 + */ 2169 + while (cur_seg < end_seg) { 2170 + dma_addr_t addr; 2171 + bus_size_t len; 2172 + int consumed; 2173 + 2174 + addr = sg_dma_address(cur_seg); 2175 + len = sg_dma_len(cur_seg); 2176 + consumed = ahc_linux_map_seg(ahc, scb, 2177 + sg, addr, len); 2178 + sg += consumed; 2179 + scb->sg_count += consumed; 2180 + cur_seg++; 2181 + } 2182 + sg--; 2183 + sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 2184 + 2185 + /* 2186 + * Reset the sg list pointer. 2187 + */ 2188 + scb->hscb->sgptr = 2189 + ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); 2190 + 2191 + /* 2192 + * Copy the first SG into the "current" 2193 + * data pointer area. 2194 + */ 2195 + scb->hscb->dataptr = scb->sg_list->addr; 2196 + scb->hscb->datacnt = scb->sg_list->len; 2197 + } else if (cmd->request_bufflen != 0) { 2198 + struct ahc_dma_seg *sg; 2199 + dma_addr_t addr; 2200 + 2201 + sg = scb->sg_list; 2202 + addr = pci_map_single(ahc->dev_softc, 2203 + cmd->request_buffer, 2204 + cmd->request_bufflen, 2205 + cmd->sc_data_direction); 2206 + scb->platform_data->buf_busaddr = addr; 2207 + scb->sg_count = ahc_linux_map_seg(ahc, scb, 2208 + sg, addr, 2209 + cmd->request_bufflen); 2210 + sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 2211 + 2212 + /* 2213 + * Reset the sg list pointer. 2214 + */ 2215 + scb->hscb->sgptr = 2216 + ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); 2217 + 2218 + /* 2219 + * Copy the first SG into the "current" 2220 + * data pointer area. 2221 + */ 2222 + scb->hscb->dataptr = sg->addr; 2223 + scb->hscb->datacnt = sg->len; 2224 + } else { 2225 + scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 2226 + scb->hscb->dataptr = 0; 2227 + scb->hscb->datacnt = 0; 2228 + scb->sg_count = 0; 2229 + } 2230 + 2231 + LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 2232 + dev->openings--; 2233 + dev->active++; 2234 + dev->commands_issued++; 2235 + if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) 2236 + dev->commands_since_idle_or_otag++; 2237 + 2238 + scb->flags |= SCB_ACTIVE; 2239 + if (untagged_q) { 2240 + TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 2241 + scb->flags |= SCB_UNTAGGEDQ; 2242 + } 2243 + ahc_queue_scb(ahc, scb); 2244 + return 0; 2085 2245 } 2086 2246 2087 2247 /* ··· 2087 2267 ahc = (struct ahc_softc *) dev_id; 2088 2268 ahc_lock(ahc, &flags); 2089 2269 ours = ahc_intr(ahc); 2090 - if (ahc_linux_next_device_to_run(ahc) != NULL) 2091 - ahc_schedule_runq(ahc); 2092 2270 ahc_linux_run_complete_queue(ahc); 2093 2271 ahc_unlock(ahc, &flags); 2094 2272 return IRQ_RETVAL(ours); ··· 2167 2349 return (NULL); 2168 2350 memset(dev, 0, sizeof(*dev)); 2169 2351 init_timer(&dev->timer); 2170 - TAILQ_INIT(&dev->busyq); 2171 2352 dev->flags = AHC_DEV_UNCONFIGURED; 2172 2353 dev->lun = lun; 2173 2354 dev->target = targ; ··· 2332 2515 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 2333 2516 untagged_q = &(ahc->untagged_queues[target_offset]); 2334 2517 TAILQ_REMOVE(untagged_q, scb, links.tqe); 2335 - ahc_run_untagged_queue(ahc, untagged_q); 2518 + BUG_ON(!TAILQ_EMPTY(untagged_q)); 2336 2519 } 2337 2520 2338 2521 if ((scb->flags & SCB_ACTIVE) == 0) { ··· 2423 2606 if (dev->active == 0) 2424 2607 dev->commands_since_idle_or_otag = 0; 2425 2608 2426 - if (TAILQ_EMPTY(&dev->busyq)) { 2427 - if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0 2428 - && dev->active == 0 2429 - && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) 2430 - ahc_linux_free_device(ahc, dev); 2431 - } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { 2609 + if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0 2610 + && dev->active == 0 2611 + && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) 2612 + ahc_linux_free_device(ahc, dev); 2613 + else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { 2432 2614 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); 2433 2615 dev->flags |= AHC_DEV_ON_RUN_LIST; 2434 2616 } ··· 2756 2940 ahc->platform_data->qfrozen--; 2757 2941 if (ahc->platform_data->qfrozen == 0) 2758 2942 unblock_reqs = 1; 2759 - ahc_schedule_runq(ahc); 2760 2943 ahc_unlock(ahc, &s); 2761 2944 /* 2762 2945 * There is still a race here. The mid-layer ··· 2780 2965 dev->flags &= ~AHC_DEV_TIMER_ACTIVE; 2781 2966 if (dev->qfrozen > 0) 2782 2967 dev->qfrozen--; 2783 - if (dev->qfrozen == 0 2784 - && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0) 2785 - ahc_linux_run_device_queue(ahc, dev); 2786 - if (TAILQ_EMPTY(&dev->busyq) 2787 - && dev->active == 0) 2968 + if (dev->active == 0) 2788 2969 __ahc_linux_free_device(ahc, dev); 2789 2970 ahc_unlock(ahc, &s); 2790 2971 } ··· 2789 2978 ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) 2790 2979 { 2791 2980 struct ahc_softc *ahc; 2792 - struct ahc_cmd *acmd; 2793 - struct ahc_cmd *list_acmd; 2794 2981 struct ahc_linux_device *dev; 2795 2982 struct scb *pending_scb; 2796 2983 u_long s; ··· 2807 2998 paused = FALSE; 2808 2999 wait = FALSE; 2809 3000 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 2810 - acmd = (struct ahc_cmd *)cmd; 2811 3001 2812 3002 printf("%s:%d:%d:%d: Attempting to queue a%s message\n", 2813 3003 ahc_name(ahc), cmd->device->channel, ··· 2854 3046 cmd->device->lun); 2855 3047 retval = SUCCESS; 2856 3048 goto no_cmd; 2857 - } 2858 - 2859 - TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) { 2860 - if (list_acmd == acmd) 2861 - break; 2862 - } 2863 - 2864 - if (list_acmd != NULL) { 2865 - printf("%s:%d:%d:%d: Command found on device queue\n", 2866 - ahc_name(ahc), cmd->device->channel, cmd->device->id, 2867 - cmd->device->lun); 2868 - if (flag == SCB_ABORT) { 2869 - TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe); 2870 - cmd->result = DID_ABORT << 16; 2871 - ahc_linux_queue_cmd_complete(ahc, cmd); 2872 - retval = SUCCESS; 2873 - goto done; 2874 - } 2875 3049 } 2876 3050 2877 3051 if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 ··· 3089 3299 } 3090 3300 spin_lock_irq(&ahc->platform_data->spin_lock); 3091 3301 } 3092 - ahc_schedule_runq(ahc); 3093 3302 ahc_linux_run_complete_queue(ahc); 3094 3303 ahc_midlayer_entrypoint_unlock(ahc, &s); 3095 3304 return (retval); ··· 3097 3308 void 3098 3309 ahc_platform_dump_card_state(struct ahc_softc *ahc) 3099 3310 { 3100 - struct ahc_linux_device *dev; 3101 - int channel; 3102 - int maxchannel; 3103 - int target; 3104 - int maxtarget; 3105 - int lun; 3106 - int i; 3107 - 3108 - maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0; 3109 - maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7; 3110 - for (channel = 0; channel <= maxchannel; channel++) { 3111 - 3112 - for (target = 0; target <=maxtarget; target++) { 3113 - 3114 - for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3115 - struct ahc_cmd *acmd; 3116 - 3117 - dev = ahc_linux_get_device(ahc, channel, target, 3118 - lun, /*alloc*/FALSE); 3119 - if (dev == NULL) 3120 - continue; 3121 - 3122 - printf("DevQ(%d:%d:%d): ", 3123 - channel, target, lun); 3124 - i = 0; 3125 - TAILQ_FOREACH(acmd, &dev->busyq, 3126 - acmd_links.tqe) { 3127 - if (i++ > AHC_SCB_MAX) 3128 - break; 3129 - } 3130 - printf("%d waiting\n", i); 3131 - } 3132 - } 3133 - } 3134 3311 } 3135 3312 3136 3313 static void ahc_linux_exit(void);
+1 -3
drivers/scsi/aic7xxx/aic7xxx_osm.h
··· 66 66 #include <linux/pci.h> 67 67 #include <linux/smp_lock.h> 68 68 #include <linux/version.h> 69 + #include <linux/interrupt.h> 69 70 #include <linux/module.h> 70 71 #include <asm/byteorder.h> 71 72 #include <asm/io.h> 72 73 73 - #include <linux/interrupt.h> /* For tasklet support. */ 74 74 #include <linux/config.h> 75 75 #include <linux/slab.h> 76 76 ··· 341 341 struct ahc_linux_target; 342 342 struct ahc_linux_device { 343 343 TAILQ_ENTRY(ahc_linux_device) links; 344 - struct ahc_busyq busyq; 345 344 346 345 /* 347 346 * The number of transactions currently ··· 487 488 struct ahc_completeq completeq; 488 489 489 490 spinlock_t spin_lock; 490 - struct tasklet_struct runq_tasklet; 491 491 u_int qfrozen; 492 492 pid_t dv_pid; 493 493 struct timer_list completeq_timer;