Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

atm: [nicstar] reformatted with Lindent

Signed-off-by: Chas Williams - CONTRACTOR <chas@cmf.nrl.navy.mil>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

chas williams - CONTRACTOR and committed by
David S. Miller
098fde11 741a00be

+2706 -2987
+2293 -2478
drivers/atm/nicstar.c
··· 1 - /****************************************************************************** 2 - * 1 + /* 3 2 * nicstar.c 4 3 * 5 4 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. ··· 15 16 * 16 17 * 17 18 * (C) INESC 1999 18 - * 19 - * 20 - ******************************************************************************/ 19 + */ 21 20 22 - 23 - /**** IMPORTANT INFORMATION *************************************************** 21 + /* 22 + * IMPORTANT INFORMATION 24 23 * 25 24 * There are currently three types of spinlocks: 26 25 * ··· 28 31 * 29 32 * These must NEVER be grabbed in reverse order. 30 33 * 31 - ******************************************************************************/ 34 + */ 32 35 33 - /* Header files ***************************************************************/ 36 + /* Header files */ 34 37 35 38 #include <linux/module.h> 36 39 #include <linux/kernel.h> ··· 59 62 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ 60 63 61 64 #if BITS_PER_LONG != 32 62 - # error FIXME: this driver requires a 32-bit platform 65 + # error FIXME: this driver requires a 32-bit platform 63 66 #endif 64 67 65 - /* Additional code ************************************************************/ 68 + /* Additional code */ 66 69 67 70 #include "nicstarmac.c" 68 71 69 - 70 - /* Configurable parameters ****************************************************/ 72 + /* Configurable parameters */ 71 73 72 74 #undef PHY_LOOPBACK 73 75 #undef TX_DEBUG ··· 74 78 #undef GENERAL_DEBUG 75 79 #undef EXTRA_DEBUG 76 80 77 - #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know 78 - you're going to use only raw ATM */ 81 + #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know 82 + you're going to use only raw ATM */ 79 83 80 - 81 - /* Do not touch these *********************************************************/ 84 + /* Do not touch these */ 82 85 83 86 #ifdef TX_DEBUG 84 87 #define TXPRINTK(args...) printk(args) ··· 103 108 #define XPRINTK(args...) 104 109 #endif /* EXTRA_DEBUG */ 105 110 106 - 107 - /* Macros *********************************************************************/ 111 + /* Macros */ 108 112 109 113 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) 110 114 ··· 120 126 #define ATM_SKB(s) (&(s)->atm) 121 127 #endif 122 128 129 + /* Function declarations */ 123 130 124 - /* Function declarations ******************************************************/ 125 - 126 - static u32 ns_read_sram(ns_dev *card, u32 sram_address); 127 - static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count); 131 + static u32 ns_read_sram(ns_dev * card, u32 sram_address); 132 + static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, 133 + int count); 128 134 static int __devinit ns_init_card(int i, struct pci_dev *pcidev); 129 - static void __devinit ns_init_card_error(ns_dev *card, int error); 135 + static void __devinit ns_init_card_error(ns_dev * card, int error); 130 136 static scq_info *get_scq(int size, u32 scd); 131 - static void free_scq(scq_info *scq, struct atm_vcc *vcc); 137 + static void free_scq(scq_info * scq, struct atm_vcc *vcc); 132 138 static void push_rxbufs(ns_dev *, struct sk_buff *); 133 139 static irqreturn_t ns_irq_handler(int irq, void *dev_id); 134 140 static int ns_open(struct atm_vcc *vcc); 135 141 static void ns_close(struct atm_vcc *vcc); 136 - static void fill_tst(ns_dev *card, int n, vc_map *vc); 142 + static void fill_tst(ns_dev * card, int n, vc_map * vc); 137 143 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); 138 - static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, 139 - struct sk_buff *skb); 140 - static void process_tsq(ns_dev *card); 141 - static void drain_scq(ns_dev *card, scq_info *scq, int pos); 142 - static void process_rsq(ns_dev *card); 143 - static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe); 144 + static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 145 + struct sk_buff *skb); 146 + static void process_tsq(ns_dev * card); 147 + static void drain_scq(ns_dev * card, scq_info * scq, int pos); 148 + static void process_rsq(ns_dev * card); 149 + static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); 144 150 #ifdef NS_USE_DESTRUCTORS 145 151 static void ns_sb_destructor(struct sk_buff *sb); 146 152 static void ns_lb_destructor(struct sk_buff *lb); 147 153 static void ns_hb_destructor(struct sk_buff *hb); 148 154 #endif /* NS_USE_DESTRUCTORS */ 149 - static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb); 150 - static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count); 151 - static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb); 152 - static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb); 153 - static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb); 154 - static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page); 155 - static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); 156 - static void which_list(ns_dev *card, struct sk_buff *skb); 155 + static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); 156 + static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); 157 + static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); 158 + static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); 159 + static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); 160 + static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); 161 + static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); 162 + static void which_list(ns_dev * card, struct sk_buff *skb); 157 163 static void ns_poll(unsigned long arg); 158 164 static int ns_parse_mac(char *mac, unsigned char *esi); 159 165 static short ns_h2i(char c); 160 166 static void ns_phy_put(struct atm_dev *dev, unsigned char value, 161 - unsigned long addr); 167 + unsigned long addr); 162 168 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); 163 169 164 - 165 - 166 - /* Global variables ***********************************************************/ 170 + /* Global variables */ 167 171 168 172 static struct ns_dev *cards[NS_MAX_CARDS]; 169 173 static unsigned num_cards; 170 - static struct atmdev_ops atm_ops = 171 - { 172 - .open = ns_open, 173 - .close = ns_close, 174 - .ioctl = ns_ioctl, 175 - .send = ns_send, 176 - .phy_put = ns_phy_put, 177 - .phy_get = ns_phy_get, 178 - .proc_read = ns_proc_read, 179 - .owner = THIS_MODULE, 174 + static struct atmdev_ops atm_ops = { 175 + .open = ns_open, 176 + .close = ns_close, 177 + .ioctl = ns_ioctl, 178 + .send = ns_send, 179 + .phy_put = ns_phy_put, 180 + .phy_get = ns_phy_get, 181 + .proc_read = ns_proc_read, 182 + .owner = THIS_MODULE, 180 183 }; 184 + 181 185 static struct timer_list ns_timer; 182 186 static char *mac[NS_MAX_CARDS]; 183 187 module_param_array(mac, charp, NULL, 0); 184 188 MODULE_LICENSE("GPL"); 185 189 186 - 187 - /* Functions*******************************************************************/ 190 + /* Functions */ 188 191 189 192 static int __devinit nicstar_init_one(struct pci_dev *pcidev, 190 193 const struct pci_device_id *ent) 191 194 { 192 - static int index = -1; 193 - unsigned int error; 195 + static int index = -1; 196 + unsigned int error; 194 197 195 - index++; 196 - cards[index] = NULL; 198 + index++; 199 + cards[index] = NULL; 197 200 198 - error = ns_init_card(index, pcidev); 199 - if (error) { 200 - cards[index--] = NULL; /* don't increment index */ 201 - goto err_out; 202 - } 201 + error = ns_init_card(index, pcidev); 202 + if (error) { 203 + cards[index--] = NULL; /* don't increment index */ 204 + goto err_out; 205 + } 203 206 204 - return 0; 207 + return 0; 205 208 err_out: 206 - return -ENODEV; 209 + return -ENODEV; 207 210 } 208 - 209 - 210 211 211 212 static void __devexit nicstar_remove_one(struct pci_dev *pcidev) 212 213 { 213 - int i, j; 214 - ns_dev *card = pci_get_drvdata(pcidev); 215 - struct sk_buff *hb; 216 - struct sk_buff *iovb; 217 - struct sk_buff *lb; 218 - struct sk_buff *sb; 219 - 220 - i = card->index; 214 + int i, j; 215 + ns_dev *card = pci_get_drvdata(pcidev); 216 + struct sk_buff *hb; 217 + struct sk_buff *iovb; 218 + struct sk_buff *lb; 219 + struct sk_buff *sb; 221 220 222 - if (cards[i] == NULL) 223 - return; 221 + i = card->index; 224 222 225 - if (card->atmdev->phy && card->atmdev->phy->stop) 226 - card->atmdev->phy->stop(card->atmdev); 223 + if (cards[i] == NULL) 224 + return; 227 225 228 - /* Stop everything */ 229 - writel(0x00000000, card->membase + CFG); 226 + if (card->atmdev->phy && card->atmdev->phy->stop) 227 + card->atmdev->phy->stop(card->atmdev); 230 228 231 - /* De-register device */ 232 - atm_dev_deregister(card->atmdev); 229 + /* Stop everything */ 230 + writel(0x00000000, card->membase + CFG); 233 231 234 - /* Disable PCI device */ 235 - pci_disable_device(pcidev); 236 - 237 - /* Free up resources */ 238 - j = 0; 239 - PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); 240 - while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) 241 - { 242 - dev_kfree_skb_any(hb); 243 - j++; 244 - } 245 - PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); 246 - j = 0; 247 - PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); 248 - while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) 249 - { 250 - dev_kfree_skb_any(iovb); 251 - j++; 252 - } 253 - PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); 254 - while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) 255 - dev_kfree_skb_any(lb); 256 - while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 257 - dev_kfree_skb_any(sb); 258 - free_scq(card->scq0, NULL); 259 - for (j = 0; j < NS_FRSCD_NUM; j++) 260 - { 261 - if (card->scd2vc[j] != NULL) 262 - free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); 263 - } 264 - kfree(card->rsq.org); 265 - kfree(card->tsq.org); 266 - free_irq(card->pcidev->irq, card); 267 - iounmap(card->membase); 268 - kfree(card); 232 + /* De-register device */ 233 + atm_dev_deregister(card->atmdev); 234 + 235 + /* Disable PCI device */ 236 + pci_disable_device(pcidev); 237 + 238 + /* Free up resources */ 239 + j = 0; 240 + PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); 241 + while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { 242 + dev_kfree_skb_any(hb); 243 + j++; 244 + } 245 + PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); 246 + j = 0; 247 + PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, 248 + card->iovpool.count); 249 + while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { 250 + dev_kfree_skb_any(iovb); 251 + j++; 252 + } 253 + PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); 254 + while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) 255 + dev_kfree_skb_any(lb); 256 + while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 257 + dev_kfree_skb_any(sb); 258 + free_scq(card->scq0, NULL); 259 + for (j = 0; j < NS_FRSCD_NUM; j++) { 260 + if (card->scd2vc[j] != NULL) 261 + free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); 262 + } 263 + kfree(card->rsq.org); 264 + kfree(card->tsq.org); 265 + free_irq(card->pcidev->irq, card); 266 + iounmap(card->membase); 267 + kfree(card); 269 268 } 270 269 271 - 272 - 273 - static struct pci_device_id nicstar_pci_tbl[] __devinitdata = 274 - { 270 + static struct pci_device_id nicstar_pci_tbl[] __devinitdata = { 275 271 {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201, 276 272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 277 273 {0,} /* terminate list */ 278 274 }; 275 + 279 276 MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); 280 277 281 - 282 - 283 278 static struct pci_driver nicstar_driver = { 284 - .name = "nicstar", 285 - .id_table = nicstar_pci_tbl, 286 - .probe = nicstar_init_one, 287 - .remove = __devexit_p(nicstar_remove_one), 279 + .name = "nicstar", 280 + .id_table = nicstar_pci_tbl, 281 + .probe = nicstar_init_one, 282 + .remove = __devexit_p(nicstar_remove_one), 288 283 }; 289 - 290 - 291 284 292 285 static int __init nicstar_init(void) 293 286 { 294 - unsigned error = 0; /* Initialized to remove compile warning */ 287 + unsigned error = 0; /* Initialized to remove compile warning */ 295 288 296 - XPRINTK("nicstar: nicstar_init() called.\n"); 289 + XPRINTK("nicstar: nicstar_init() called.\n"); 297 290 298 - error = pci_register_driver(&nicstar_driver); 299 - 300 - TXPRINTK("nicstar: TX debug enabled.\n"); 301 - RXPRINTK("nicstar: RX debug enabled.\n"); 302 - PRINTK("nicstar: General debug enabled.\n"); 291 + error = pci_register_driver(&nicstar_driver); 292 + 293 + TXPRINTK("nicstar: TX debug enabled.\n"); 294 + RXPRINTK("nicstar: RX debug enabled.\n"); 295 + PRINTK("nicstar: General debug enabled.\n"); 303 296 #ifdef PHY_LOOPBACK 304 - printk("nicstar: using PHY loopback.\n"); 297 + printk("nicstar: using PHY loopback.\n"); 305 298 #endif /* PHY_LOOPBACK */ 306 - XPRINTK("nicstar: nicstar_init() returned.\n"); 299 + XPRINTK("nicstar: nicstar_init() returned.\n"); 307 300 308 - if (!error) { 309 - init_timer(&ns_timer); 310 - ns_timer.expires = jiffies + NS_POLL_PERIOD; 311 - ns_timer.data = 0UL; 312 - ns_timer.function = ns_poll; 313 - add_timer(&ns_timer); 314 - } 315 - 316 - return error; 301 + if (!error) { 302 + init_timer(&ns_timer); 303 + ns_timer.expires = jiffies + NS_POLL_PERIOD; 304 + ns_timer.data = 0UL; 305 + ns_timer.function = ns_poll; 306 + add_timer(&ns_timer); 307 + } 308 + 309 + return error; 317 310 } 318 - 319 - 320 311 321 312 static void __exit nicstar_cleanup(void) 322 313 { 323 - XPRINTK("nicstar: nicstar_cleanup() called.\n"); 314 + XPRINTK("nicstar: nicstar_cleanup() called.\n"); 324 315 325 - del_timer(&ns_timer); 316 + del_timer(&ns_timer); 326 317 327 - pci_unregister_driver(&nicstar_driver); 318 + pci_unregister_driver(&nicstar_driver); 328 319 329 - XPRINTK("nicstar: nicstar_cleanup() returned.\n"); 320 + XPRINTK("nicstar: nicstar_cleanup() returned.\n"); 330 321 } 331 322 332 - 333 - 334 - static u32 ns_read_sram(ns_dev *card, u32 sram_address) 323 + static u32 ns_read_sram(ns_dev * card, u32 sram_address) 335 324 { 336 - unsigned long flags; 337 - u32 data; 338 - sram_address <<= 2; 339 - sram_address &= 0x0007FFFC; /* address must be dword aligned */ 340 - sram_address |= 0x50000000; /* SRAM read command */ 341 - spin_lock_irqsave(&card->res_lock, flags); 342 - while (CMD_BUSY(card)); 343 - writel(sram_address, card->membase + CMD); 344 - while (CMD_BUSY(card)); 345 - data = readl(card->membase + DR0); 346 - spin_unlock_irqrestore(&card->res_lock, flags); 347 - return data; 325 + unsigned long flags; 326 + u32 data; 327 + sram_address <<= 2; 328 + sram_address &= 0x0007FFFC; /* address must be dword aligned */ 329 + sram_address |= 0x50000000; /* SRAM read command */ 330 + spin_lock_irqsave(&card->res_lock, flags); 331 + while (CMD_BUSY(card)) ; 332 + writel(sram_address, card->membase + CMD); 333 + while (CMD_BUSY(card)) ; 334 + data = readl(card->membase + DR0); 335 + spin_unlock_irqrestore(&card->res_lock, flags); 336 + return data; 348 337 } 349 338 350 - 351 - 352 - static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count) 339 + static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, 340 + int count) 353 341 { 354 - unsigned long flags; 355 - int i, c; 356 - count--; /* count range now is 0..3 instead of 1..4 */ 357 - c = count; 358 - c <<= 2; /* to use increments of 4 */ 359 - spin_lock_irqsave(&card->res_lock, flags); 360 - while (CMD_BUSY(card)); 361 - for (i = 0; i <= c; i += 4) 362 - writel(*(value++), card->membase + i); 363 - /* Note: DR# registers are the first 4 dwords in nicstar's memspace, 364 - so card->membase + DR0 == card->membase */ 365 - sram_address <<= 2; 366 - sram_address &= 0x0007FFFC; 367 - sram_address |= (0x40000000 | count); 368 - writel(sram_address, card->membase + CMD); 369 - spin_unlock_irqrestore(&card->res_lock, flags); 342 + unsigned long flags; 343 + int i, c; 344 + count--; /* count range now is 0..3 instead of 1..4 */ 345 + c = count; 346 + c <<= 2; /* to use increments of 4 */ 347 + spin_lock_irqsave(&card->res_lock, flags); 348 + while (CMD_BUSY(card)) ; 349 + for (i = 0; i <= c; i += 4) 350 + writel(*(value++), card->membase + i); 351 + /* Note: DR# registers are the first 4 dwords in nicstar's memspace, 352 + so card->membase + DR0 == card->membase */ 353 + sram_address <<= 2; 354 + sram_address &= 0x0007FFFC; 355 + sram_address |= (0x40000000 | count); 356 + writel(sram_address, card->membase + CMD); 357 + spin_unlock_irqrestore(&card->res_lock, flags); 370 358 } 371 - 372 359 373 360 static int __devinit ns_init_card(int i, struct pci_dev *pcidev) 374 361 { 375 - int j; 376 - struct ns_dev *card = NULL; 377 - unsigned char pci_latency; 378 - unsigned error; 379 - u32 data; 380 - u32 u32d[4]; 381 - u32 ns_cfg_rctsize; 382 - int bcount; 383 - unsigned long membase; 362 + int j; 363 + struct ns_dev *card = NULL; 364 + unsigned char pci_latency; 365 + unsigned error; 366 + u32 data; 367 + u32 u32d[4]; 368 + u32 ns_cfg_rctsize; 369 + int bcount; 370 + unsigned long membase; 384 371 385 - error = 0; 372 + error = 0; 386 373 387 - if (pci_enable_device(pcidev)) 388 - { 389 - printk("nicstar%d: can't enable PCI device\n", i); 390 - error = 2; 391 - ns_init_card_error(card, error); 392 - return error; 393 - } 374 + if (pci_enable_device(pcidev)) { 375 + printk("nicstar%d: can't enable PCI device\n", i); 376 + error = 2; 377 + ns_init_card_error(card, error); 378 + return error; 379 + } 394 380 395 - if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) 396 - { 397 - printk("nicstar%d: can't allocate memory for device structure.\n", i); 398 - error = 2; 399 - ns_init_card_error(card, error); 400 - return error; 401 - } 402 - cards[i] = card; 403 - spin_lock_init(&card->int_lock); 404 - spin_lock_init(&card->res_lock); 405 - 406 - pci_set_drvdata(pcidev, card); 407 - 408 - card->index = i; 409 - card->atmdev = NULL; 410 - card->pcidev = pcidev; 411 - membase = pci_resource_start(pcidev, 1); 412 - card->membase = ioremap(membase, NS_IOREMAP_SIZE); 413 - if (!card->membase) 414 - { 415 - printk("nicstar%d: can't ioremap() membase.\n",i); 416 - error = 3; 417 - ns_init_card_error(card, error); 418 - return error; 419 - } 420 - PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); 381 + if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { 382 + printk 383 + ("nicstar%d: can't allocate memory for device structure.\n", 384 + i); 385 + error = 2; 386 + ns_init_card_error(card, error); 387 + return error; 388 + } 389 + cards[i] = card; 390 + spin_lock_init(&card->int_lock); 391 + spin_lock_init(&card->res_lock); 421 392 422 - pci_set_master(pcidev); 393 + pci_set_drvdata(pcidev, card); 423 394 424 - if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) 425 - { 426 - printk("nicstar%d: can't read PCI latency timer.\n", i); 427 - error = 6; 428 - ns_init_card_error(card, error); 429 - return error; 430 - } 395 + card->index = i; 396 + card->atmdev = NULL; 397 + card->pcidev = pcidev; 398 + membase = pci_resource_start(pcidev, 1); 399 + card->membase = ioremap(membase, NS_IOREMAP_SIZE); 400 + if (!card->membase) { 401 + printk("nicstar%d: can't ioremap() membase.\n", i); 402 + error = 3; 403 + ns_init_card_error(card, error); 404 + return error; 405 + } 406 + PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); 407 + 408 + pci_set_master(pcidev); 409 + 410 + if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { 411 + printk("nicstar%d: can't read PCI latency timer.\n", i); 412 + error = 6; 413 + ns_init_card_error(card, error); 414 + return error; 415 + } 431 416 #ifdef NS_PCI_LATENCY 432 - if (pci_latency < NS_PCI_LATENCY) 433 - { 434 - PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); 435 - for (j = 1; j < 4; j++) 436 - { 437 - if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) 438 - break; 439 - } 440 - if (j == 4) 441 - { 442 - printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); 443 - error = 7; 444 - ns_init_card_error(card, error); 445 - return error; 446 - } 447 - } 417 + if (pci_latency < NS_PCI_LATENCY) { 418 + PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, 419 + NS_PCI_LATENCY); 420 + for (j = 1; j < 4; j++) { 421 + if (pci_write_config_byte 422 + (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) 423 + break; 424 + } 425 + if (j == 4) { 426 + printk 427 + ("nicstar%d: can't set PCI latency timer to %d.\n", 428 + i, NS_PCI_LATENCY); 429 + error = 7; 430 + ns_init_card_error(card, error); 431 + return error; 432 + } 433 + } 448 434 #endif /* NS_PCI_LATENCY */ 449 - 450 - /* Clear timer overflow */ 451 - data = readl(card->membase + STAT); 452 - if (data & NS_STAT_TMROF) 453 - writel(NS_STAT_TMROF, card->membase + STAT); 454 435 455 - /* Software reset */ 456 - writel(NS_CFG_SWRST, card->membase + CFG); 457 - NS_DELAY; 458 - writel(0x00000000, card->membase + CFG); 436 + /* Clear timer overflow */ 437 + data = readl(card->membase + STAT); 438 + if (data & NS_STAT_TMROF) 439 + writel(NS_STAT_TMROF, card->membase + STAT); 459 440 460 - /* PHY reset */ 461 - writel(0x00000008, card->membase + GP); 462 - NS_DELAY; 463 - writel(0x00000001, card->membase + GP); 464 - NS_DELAY; 465 - while (CMD_BUSY(card)); 466 - writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ 467 - NS_DELAY; 468 - 469 - /* Detect PHY type */ 470 - while (CMD_BUSY(card)); 471 - writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); 472 - while (CMD_BUSY(card)); 473 - data = readl(card->membase + DR0); 474 - switch(data) { 475 - case 0x00000009: 476 - printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); 477 - card->max_pcr = ATM_25_PCR; 478 - while(CMD_BUSY(card)); 479 - writel(0x00000008, card->membase + DR0); 480 - writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); 481 - /* Clear an eventual pending interrupt */ 482 - writel(NS_STAT_SFBQF, card->membase + STAT); 441 + /* Software reset */ 442 + writel(NS_CFG_SWRST, card->membase + CFG); 443 + NS_DELAY; 444 + writel(0x00000000, card->membase + CFG); 445 + 446 + /* PHY reset */ 447 + writel(0x00000008, card->membase + GP); 448 + NS_DELAY; 449 + writel(0x00000001, card->membase + GP); 450 + NS_DELAY; 451 + while (CMD_BUSY(card)) ; 452 + writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ 453 + NS_DELAY; 454 + 455 + /* Detect PHY type */ 456 + while (CMD_BUSY(card)) ; 457 + writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); 458 + while (CMD_BUSY(card)) ; 459 + data = readl(card->membase + DR0); 460 + switch (data) { 461 + case 0x00000009: 462 + printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); 463 + card->max_pcr = ATM_25_PCR; 464 + while (CMD_BUSY(card)) ; 465 + writel(0x00000008, card->membase + DR0); 466 + writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); 467 + /* Clear an eventual pending interrupt */ 468 + writel(NS_STAT_SFBQF, card->membase + STAT); 483 469 #ifdef PHY_LOOPBACK 484 - while(CMD_BUSY(card)); 485 - writel(0x00000022, card->membase + DR0); 486 - writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); 470 + while (CMD_BUSY(card)) ; 471 + writel(0x00000022, card->membase + DR0); 472 + writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); 487 473 #endif /* PHY_LOOPBACK */ 488 - break; 489 - case 0x00000030: 490 - case 0x00000031: 491 - printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); 492 - card->max_pcr = ATM_OC3_PCR; 474 + break; 475 + case 0x00000030: 476 + case 0x00000031: 477 + printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); 478 + card->max_pcr = ATM_OC3_PCR; 493 479 #ifdef PHY_LOOPBACK 494 - while(CMD_BUSY(card)); 495 - writel(0x00000002, card->membase + DR0); 496 - writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); 480 + while (CMD_BUSY(card)) ; 481 + writel(0x00000002, card->membase + DR0); 482 + writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); 497 483 #endif /* PHY_LOOPBACK */ 498 - break; 499 - default: 500 - printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); 501 - error = 8; 502 - ns_init_card_error(card, error); 503 - return error; 504 - } 505 - writel(0x00000000, card->membase + GP); 484 + break; 485 + default: 486 + printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); 487 + error = 8; 488 + ns_init_card_error(card, error); 489 + return error; 490 + } 491 + writel(0x00000000, card->membase + GP); 506 492 507 - /* Determine SRAM size */ 508 - data = 0x76543210; 509 - ns_write_sram(card, 0x1C003, &data, 1); 510 - data = 0x89ABCDEF; 511 - ns_write_sram(card, 0x14003, &data, 1); 512 - if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && 513 - ns_read_sram(card, 0x1C003) == 0x76543210) 514 - card->sram_size = 128; 515 - else 516 - card->sram_size = 32; 517 - PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); 493 + /* Determine SRAM size */ 494 + data = 0x76543210; 495 + ns_write_sram(card, 0x1C003, &data, 1); 496 + data = 0x89ABCDEF; 497 + ns_write_sram(card, 0x14003, &data, 1); 498 + if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && 499 + ns_read_sram(card, 0x1C003) == 0x76543210) 500 + card->sram_size = 128; 501 + else 502 + card->sram_size = 32; 503 + PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); 518 504 519 - card->rct_size = NS_MAX_RCTSIZE; 505 + card->rct_size = NS_MAX_RCTSIZE; 520 506 521 507 #if (NS_MAX_RCTSIZE == 4096) 522 - if (card->sram_size == 128) 523 - printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); 508 + if (card->sram_size == 128) 509 + printk 510 + ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", 511 + i); 524 512 #elif (NS_MAX_RCTSIZE == 16384) 525 - if (card->sram_size == 32) 526 - { 527 - printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); 528 - card->rct_size = 4096; 529 - } 513 + if (card->sram_size == 32) { 514 + printk 515 + ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", 516 + i); 517 + card->rct_size = 4096; 518 + } 530 519 #else 531 520 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c 532 521 #endif 533 522 534 - card->vpibits = NS_VPIBITS; 535 - if (card->rct_size == 4096) 536 - card->vcibits = 12 - NS_VPIBITS; 537 - else /* card->rct_size == 16384 */ 538 - card->vcibits = 14 - NS_VPIBITS; 523 + card->vpibits = NS_VPIBITS; 524 + if (card->rct_size == 4096) 525 + card->vcibits = 12 - NS_VPIBITS; 526 + else /* card->rct_size == 16384 */ 527 + card->vcibits = 14 - NS_VPIBITS; 539 528 540 - /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ 541 - if (mac[i] == NULL) 542 - nicstar_init_eprom(card->membase); 529 + /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ 530 + if (mac[i] == NULL) 531 + nicstar_init_eprom(card->membase); 543 532 544 - /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ 545 - writel(0x00000000, card->membase + VPM); 546 - 547 - /* Initialize TSQ */ 548 - card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); 549 - if (card->tsq.org == NULL) 550 - { 551 - printk("nicstar%d: can't allocate TSQ.\n", i); 552 - error = 10; 553 - ns_init_card_error(card, error); 554 - return error; 555 - } 556 - card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT); 557 - card->tsq.next = card->tsq.base; 558 - card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); 559 - for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) 560 - ns_tsi_init(card->tsq.base + j); 561 - writel(0x00000000, card->membase + TSQH); 562 - writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); 563 - PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base, 564 - (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB)); 565 - 566 - /* Initialize RSQ */ 567 - card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); 568 - if (card->rsq.org == NULL) 569 - { 570 - printk("nicstar%d: can't allocate RSQ.\n", i); 571 - error = 11; 572 - ns_init_card_error(card, error); 573 - return error; 574 - } 575 - card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT); 576 - card->rsq.next = card->rsq.base; 577 - card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); 578 - for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) 579 - ns_rsqe_init(card->rsq.base + j); 580 - writel(0x00000000, card->membase + RSQH); 581 - writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); 582 - PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); 583 - 584 - /* Initialize SCQ0, the only VBR SCQ used */ 585 - card->scq1 = NULL; 586 - card->scq2 = NULL; 587 - card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); 588 - if (card->scq0 == NULL) 589 - { 590 - printk("nicstar%d: can't get SCQ0.\n", i); 591 - error = 12; 592 - ns_init_card_error(card, error); 593 - return error; 594 - } 595 - u32d[0] = (u32) virt_to_bus(card->scq0->base); 596 - u32d[1] = (u32) 0x00000000; 597 - u32d[2] = (u32) 0xffffffff; 598 - u32d[3] = (u32) 0x00000000; 599 - ns_write_sram(card, NS_VRSCD0, u32d, 4); 600 - ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ 601 - ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ 602 - card->scq0->scd = NS_VRSCD0; 603 - PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base); 533 + /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ 534 + writel(0x00000000, card->membase + VPM); 604 535 605 - /* Initialize TSTs */ 606 - card->tst_addr = NS_TST0; 607 - card->tst_free_entries = NS_TST_NUM_ENTRIES; 608 - data = NS_TST_OPCODE_VARIABLE; 609 - for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 610 - ns_write_sram(card, NS_TST0 + j, &data, 1); 611 - data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); 612 - ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); 613 - for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 614 - ns_write_sram(card, NS_TST1 + j, &data, 1); 615 - data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); 616 - ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); 617 - for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 618 - card->tste2vc[j] = NULL; 619 - writel(NS_TST0 << 2, card->membase + TSTB); 536 + /* Initialize TSQ */ 537 + card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); 538 + if (card->tsq.org == NULL) { 539 + printk("nicstar%d: can't allocate TSQ.\n", i); 540 + error = 10; 541 + ns_init_card_error(card, error); 542 + return error; 543 + } 544 + card->tsq.base = 545 + (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT); 546 + card->tsq.next = card->tsq.base; 547 + card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); 548 + for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) 549 + ns_tsi_init(card->tsq.base + j); 550 + writel(0x00000000, card->membase + TSQH); 551 + writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); 552 + PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, 553 + (u32) card->tsq.base, (u32) virt_to_bus(card->tsq.base), 554 + readl(card->membase + TSQB)); 620 555 556 + /* Initialize RSQ */ 557 + card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); 558 + if (card->rsq.org == NULL) { 559 + printk("nicstar%d: can't allocate RSQ.\n", i); 560 + error = 11; 561 + ns_init_card_error(card, error); 562 + return error; 563 + } 564 + card->rsq.base = 565 + (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT); 566 + card->rsq.next = card->rsq.base; 567 + card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); 568 + for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) 569 + ns_rsqe_init(card->rsq.base + j); 570 + writel(0x00000000, card->membase + RSQH); 571 + writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); 572 + PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); 621 573 622 - /* Initialize RCT. AAL type is set on opening the VC. */ 574 + /* Initialize SCQ0, the only VBR SCQ used */ 575 + card->scq1 = NULL; 576 + card->scq2 = NULL; 577 + card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); 578 + if (card->scq0 == NULL) { 579 + printk("nicstar%d: can't get SCQ0.\n", i); 580 + error = 12; 581 + ns_init_card_error(card, error); 582 + return error; 583 + } 584 + u32d[0] = (u32) virt_to_bus(card->scq0->base); 585 + u32d[1] = (u32) 0x00000000; 586 + u32d[2] = (u32) 0xffffffff; 587 + u32d[3] = (u32) 0x00000000; 588 + ns_write_sram(card, NS_VRSCD0, u32d, 4); 589 + ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ 590 + ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ 591 + card->scq0->scd = NS_VRSCD0; 592 + PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, 593 + (u32) card->scq0->base); 594 + 595 + /* Initialize TSTs */ 596 + card->tst_addr = NS_TST0; 597 + card->tst_free_entries = NS_TST_NUM_ENTRIES; 598 + data = NS_TST_OPCODE_VARIABLE; 599 + for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 600 + ns_write_sram(card, NS_TST0 + j, &data, 1); 601 + data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); 602 + ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); 603 + for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 604 + ns_write_sram(card, NS_TST1 + j, &data, 1); 605 + data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); 606 + ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); 607 + for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 608 + card->tste2vc[j] = NULL; 609 + writel(NS_TST0 << 2, card->membase + TSTB); 610 + 611 + /* Initialize RCT. AAL type is set on opening the VC. */ 623 612 #ifdef RCQ_SUPPORT 624 - u32d[0] = NS_RCTE_RAWCELLINTEN; 613 + u32d[0] = NS_RCTE_RAWCELLINTEN; 625 614 #else 626 - u32d[0] = 0x00000000; 615 + u32d[0] = 0x00000000; 627 616 #endif /* RCQ_SUPPORT */ 628 - u32d[1] = 0x00000000; 629 - u32d[2] = 0x00000000; 630 - u32d[3] = 0xFFFFFFFF; 631 - for (j = 0; j < card->rct_size; j++) 632 - ns_write_sram(card, j * 4, u32d, 4); 633 - 634 - memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); 635 - 636 - for (j = 0; j < NS_FRSCD_NUM; j++) 637 - card->scd2vc[j] = NULL; 617 + u32d[1] = 0x00000000; 618 + u32d[2] = 0x00000000; 619 + u32d[3] = 0xFFFFFFFF; 620 + for (j = 0; j < card->rct_size; j++) 621 + ns_write_sram(card, j * 4, u32d, 4); 638 622 639 - /* Initialize buffer levels */ 640 - card->sbnr.min = MIN_SB; 641 - card->sbnr.init = NUM_SB; 642 - card->sbnr.max = MAX_SB; 643 - card->lbnr.min = MIN_LB; 644 - card->lbnr.init = NUM_LB; 645 - card->lbnr.max = MAX_LB; 646 - card->iovnr.min = MIN_IOVB; 647 - card->iovnr.init = NUM_IOVB; 648 - card->iovnr.max = MAX_IOVB; 649 - card->hbnr.min = MIN_HB; 650 - card->hbnr.init = NUM_HB; 651 - card->hbnr.max = MAX_HB; 652 - 653 - card->sm_handle = 0x00000000; 654 - card->sm_addr = 0x00000000; 655 - card->lg_handle = 0x00000000; 656 - card->lg_addr = 0x00000000; 657 - 658 - card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ 623 + memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); 659 624 660 - /* Pre-allocate some huge buffers */ 661 - skb_queue_head_init(&card->hbpool.queue); 662 - card->hbpool.count = 0; 663 - for (j = 0; j < NUM_HB; j++) 664 - { 665 - struct sk_buff *hb; 666 - hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 667 - if (hb == NULL) 668 - { 669 - printk("nicstar%d: can't allocate %dth of %d huge buffers.\n", 670 - i, j, NUM_HB); 671 - error = 13; 672 - ns_init_card_error(card, error); 673 - return error; 674 - } 675 - NS_SKB_CB(hb)->buf_type = BUF_NONE; 676 - skb_queue_tail(&card->hbpool.queue, hb); 677 - card->hbpool.count++; 678 - } 625 + for (j = 0; j < NS_FRSCD_NUM; j++) 626 + card->scd2vc[j] = NULL; 679 627 628 + /* Initialize buffer levels */ 629 + card->sbnr.min = MIN_SB; 630 + card->sbnr.init = NUM_SB; 631 + card->sbnr.max = MAX_SB; 632 + card->lbnr.min = MIN_LB; 633 + card->lbnr.init = NUM_LB; 634 + card->lbnr.max = MAX_LB; 635 + card->iovnr.min = MIN_IOVB; 636 + card->iovnr.init = NUM_IOVB; 637 + card->iovnr.max = MAX_IOVB; 638 + card->hbnr.min = MIN_HB; 639 + card->hbnr.init = NUM_HB; 640 + card->hbnr.max = MAX_HB; 680 641 681 - /* Allocate large buffers */ 682 - skb_queue_head_init(&card->lbpool.queue); 683 - card->lbpool.count = 0; /* Not used */ 684 - for (j = 0; j < NUM_LB; j++) 685 - { 686 - struct sk_buff *lb; 687 - lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 688 - if (lb == NULL) 689 - { 690 - printk("nicstar%d: can't allocate %dth of %d large buffers.\n", 691 - i, j, NUM_LB); 692 - error = 14; 693 - ns_init_card_error(card, error); 694 - return error; 695 - } 696 - NS_SKB_CB(lb)->buf_type = BUF_LG; 697 - skb_queue_tail(&card->lbpool.queue, lb); 698 - skb_reserve(lb, NS_SMBUFSIZE); 699 - push_rxbufs(card, lb); 700 - /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 701 - if (j == 1) 702 - { 703 - card->rcbuf = lb; 704 - card->rawch = (u32) virt_to_bus(lb->data); 705 - } 706 - } 707 - /* Test for strange behaviour which leads to crashes */ 708 - if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) 709 - { 710 - printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", 711 - i, j, bcount); 712 - error = 14; 713 - ns_init_card_error(card, error); 714 - return error; 715 - } 716 - 642 + card->sm_handle = 0x00000000; 643 + card->sm_addr = 0x00000000; 644 + card->lg_handle = 0x00000000; 645 + card->lg_addr = 0x00000000; 717 646 718 - /* Allocate small buffers */ 719 - skb_queue_head_init(&card->sbpool.queue); 720 - card->sbpool.count = 0; /* Not used */ 721 - for (j = 0; j < NUM_SB; j++) 722 - { 723 - struct sk_buff *sb; 724 - sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 725 - if (sb == NULL) 726 - { 727 - printk("nicstar%d: can't allocate %dth of %d small buffers.\n", 728 - i, j, NUM_SB); 729 - error = 15; 730 - ns_init_card_error(card, error); 731 - return error; 732 - } 733 - NS_SKB_CB(sb)->buf_type = BUF_SM; 734 - skb_queue_tail(&card->sbpool.queue, sb); 735 - skb_reserve(sb, NS_AAL0_HEADER); 736 - push_rxbufs(card, sb); 737 - } 738 - /* Test for strange behaviour which leads to crashes */ 739 - if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) 740 - { 741 - printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", 742 - i, j, bcount); 743 - error = 15; 744 - ns_init_card_error(card, error); 745 - return error; 746 - } 747 - 647 + card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ 748 648 749 - /* Allocate iovec buffers */ 750 - skb_queue_head_init(&card->iovpool.queue); 751 - card->iovpool.count = 0; 752 - for (j = 0; j < NUM_IOVB; j++) 753 - { 754 - struct sk_buff *iovb; 755 - iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 756 - if (iovb == NULL) 757 - { 758 - printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n", 759 - i, j, NUM_IOVB); 760 - error = 16; 761 - ns_init_card_error(card, error); 762 - return error; 763 - } 764 - NS_SKB_CB(iovb)->buf_type = BUF_NONE; 765 - skb_queue_tail(&card->iovpool.queue, iovb); 766 - card->iovpool.count++; 767 - } 649 + /* Pre-allocate some huge buffers */ 650 + skb_queue_head_init(&card->hbpool.queue); 651 + card->hbpool.count = 0; 652 + for (j = 0; j < NUM_HB; j++) { 653 + struct sk_buff *hb; 654 + hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 655 + if (hb == NULL) { 656 + printk 657 + ("nicstar%d: can't allocate %dth of %d huge buffers.\n", 658 + i, j, NUM_HB); 659 + error = 13; 660 + ns_init_card_error(card, error); 661 + return error; 662 + } 663 + NS_SKB_CB(hb)->buf_type = BUF_NONE; 664 + skb_queue_tail(&card->hbpool.queue, hb); 665 + card->hbpool.count++; 666 + } 768 667 769 - /* Configure NICStAR */ 770 - if (card->rct_size == 4096) 771 - ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; 772 - else /* (card->rct_size == 16384) */ 773 - ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; 668 + /* Allocate large buffers */ 669 + skb_queue_head_init(&card->lbpool.queue); 670 + card->lbpool.count = 0; /* Not used */ 671 + for (j = 0; j < NUM_LB; j++) { 672 + struct sk_buff *lb; 673 + lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 674 + if (lb == NULL) { 675 + printk 676 + ("nicstar%d: can't allocate %dth of %d large buffers.\n", 677 + i, j, NUM_LB); 678 + error = 14; 679 + ns_init_card_error(card, error); 680 + return error; 681 + } 682 + NS_SKB_CB(lb)->buf_type = BUF_LG; 683 + skb_queue_tail(&card->lbpool.queue, lb); 684 + skb_reserve(lb, NS_SMBUFSIZE); 685 + push_rxbufs(card, lb); 686 + /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 687 + if (j == 1) { 688 + card->rcbuf = lb; 689 + card->rawch = (u32) virt_to_bus(lb->data); 690 + } 691 + } 692 + /* Test for strange behaviour which leads to crashes */ 693 + if ((bcount = 694 + ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { 695 + printk 696 + ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", 697 + i, j, bcount); 698 + error = 14; 699 + ns_init_card_error(card, error); 700 + return error; 701 + } 774 702 775 - card->efbie = 1; 703 + /* Allocate small buffers */ 704 + skb_queue_head_init(&card->sbpool.queue); 705 + card->sbpool.count = 0; /* Not used */ 706 + for (j = 0; j < NUM_SB; j++) { 707 + struct sk_buff *sb; 708 + sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 709 + if (sb == NULL) { 710 + printk 711 + ("nicstar%d: can't allocate %dth of %d small buffers.\n", 712 + i, j, NUM_SB); 713 + error = 15; 714 + ns_init_card_error(card, error); 715 + return error; 716 + } 717 + NS_SKB_CB(sb)->buf_type = BUF_SM; 718 + skb_queue_tail(&card->sbpool.queue, sb); 719 + skb_reserve(sb, NS_AAL0_HEADER); 720 + push_rxbufs(card, sb); 721 + } 722 + /* Test for strange behaviour which leads to crashes */ 723 + if ((bcount = 724 + ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { 725 + printk 726 + ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", 727 + i, j, bcount); 728 + error = 15; 729 + ns_init_card_error(card, error); 730 + return error; 731 + } 776 732 777 - card->intcnt = 0; 778 - if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) 779 - { 780 - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); 781 - error = 9; 782 - ns_init_card_error(card, error); 783 - return error; 784 - } 733 + /* Allocate iovec buffers */ 734 + skb_queue_head_init(&card->iovpool.queue); 735 + card->iovpool.count = 0; 736 + for (j = 0; j < NUM_IOVB; j++) { 737 + struct sk_buff *iovb; 738 + iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 739 + if (iovb == NULL) { 740 + printk 741 + ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", 742 + i, j, NUM_IOVB); 743 + error = 16; 744 + ns_init_card_error(card, error); 745 + return error; 746 + } 747 + NS_SKB_CB(iovb)->buf_type = BUF_NONE; 748 + skb_queue_tail(&card->iovpool.queue, iovb); 749 + card->iovpool.count++; 750 + } 785 751 786 - /* Register device */ 787 - card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); 788 - if (card->atmdev == NULL) 789 - { 790 - printk("nicstar%d: can't register device.\n", i); 791 - error = 17; 792 - ns_init_card_error(card, error); 793 - return error; 794 - } 795 - 796 - if (ns_parse_mac(mac[i], card->atmdev->esi)) { 797 - nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 798 - card->atmdev->esi, 6); 799 - if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) { 800 - nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, 801 - card->atmdev->esi, 6); 802 - } 803 - } 752 + /* Configure NICStAR */ 753 + if (card->rct_size == 4096) 754 + ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; 755 + else /* (card->rct_size == 16384) */ 756 + ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; 804 757 805 - printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); 758 + card->efbie = 1; 806 759 807 - card->atmdev->dev_data = card; 808 - card->atmdev->ci_range.vpi_bits = card->vpibits; 809 - card->atmdev->ci_range.vci_bits = card->vcibits; 810 - card->atmdev->link_rate = card->max_pcr; 811 - card->atmdev->phy = NULL; 760 + card->intcnt = 0; 761 + if (request_irq 762 + (pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, 763 + "nicstar", card) != 0) { 764 + printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); 765 + error = 9; 766 + ns_init_card_error(card, error); 767 + return error; 768 + } 769 + 770 + /* Register device */ 771 + card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); 772 + if (card->atmdev == NULL) { 773 + printk("nicstar%d: can't register device.\n", i); 774 + error = 17; 775 + ns_init_card_error(card, error); 776 + return error; 777 + } 778 + 779 + if (ns_parse_mac(mac[i], card->atmdev->esi)) { 780 + nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 781 + card->atmdev->esi, 6); 782 + if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 783 + 0) { 784 + nicstar_read_eprom(card->membase, 785 + NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, 786 + card->atmdev->esi, 6); 787 + } 788 + } 789 + 790 + printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); 791 + 792 + card->atmdev->dev_data = card; 793 + card->atmdev->ci_range.vpi_bits = card->vpibits; 794 + card->atmdev->ci_range.vci_bits = card->vcibits; 795 + card->atmdev->link_rate = card->max_pcr; 796 + card->atmdev->phy = NULL; 812 797 813 798 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI 814 - if (card->max_pcr == ATM_OC3_PCR) 815 - suni_init(card->atmdev); 799 + if (card->max_pcr == ATM_OC3_PCR) 800 + suni_init(card->atmdev); 816 801 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ 817 802 818 803 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 819 - if (card->max_pcr == ATM_25_PCR) 820 - idt77105_init(card->atmdev); 804 + if (card->max_pcr == ATM_25_PCR) 805 + idt77105_init(card->atmdev); 821 806 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ 822 807 823 - if (card->atmdev->phy && card->atmdev->phy->start) 824 - card->atmdev->phy->start(card->atmdev); 808 + if (card->atmdev->phy && card->atmdev->phy->start) 809 + card->atmdev->phy->start(card->atmdev); 825 810 826 - writel(NS_CFG_RXPATH | 827 - NS_CFG_SMBUFSIZE | 828 - NS_CFG_LGBUFSIZE | 829 - NS_CFG_EFBIE | 830 - NS_CFG_RSQSIZE | 831 - NS_CFG_VPIBITS | 832 - ns_cfg_rctsize | 833 - NS_CFG_RXINT_NODELAY | 834 - NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ 835 - NS_CFG_RSQAFIE | 836 - NS_CFG_TXEN | 837 - NS_CFG_TXIE | 838 - NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ 839 - NS_CFG_PHYIE, 840 - card->membase + CFG); 811 + writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ 812 + NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ 813 + NS_CFG_PHYIE, card->membase + CFG); 841 814 842 - num_cards++; 815 + num_cards++; 843 816 844 - return error; 817 + return error; 845 818 } 846 819 847 - 848 - 849 - static void __devinit ns_init_card_error(ns_dev *card, int error) 820 + static void __devinit ns_init_card_error(ns_dev * card, int error) 850 821 { 851 - if (error >= 17) 852 - { 853 - writel(0x00000000, card->membase + CFG); 854 - } 855 - if (error >= 16) 856 - { 857 - struct sk_buff *iovb; 858 - while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) 859 - dev_kfree_skb_any(iovb); 860 - } 861 - if (error >= 15) 862 - { 863 - struct sk_buff *sb; 864 - while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 865 - dev_kfree_skb_any(sb); 866 - free_scq(card->scq0, NULL); 867 - } 868 - if (error >= 14) 869 - { 870 - struct sk_buff *lb; 871 - while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) 872 - dev_kfree_skb_any(lb); 873 - } 874 - if (error >= 13) 875 - { 876 - struct sk_buff *hb; 877 - while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) 878 - dev_kfree_skb_any(hb); 879 - } 880 - if (error >= 12) 881 - { 882 - kfree(card->rsq.org); 883 - } 884 - if (error >= 11) 885 - { 886 - kfree(card->tsq.org); 887 - } 888 - if (error >= 10) 889 - { 890 - free_irq(card->pcidev->irq, card); 891 - } 892 - if (error >= 4) 893 - { 894 - iounmap(card->membase); 895 - } 896 - if (error >= 3) 897 - { 898 - pci_disable_device(card->pcidev); 899 - kfree(card); 900 - } 822 + if (error >= 17) { 823 + writel(0x00000000, card->membase + CFG); 824 + } 825 + if (error >= 16) { 826 + struct sk_buff *iovb; 827 + while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) 828 + dev_kfree_skb_any(iovb); 829 + } 830 + if (error >= 15) { 831 + struct sk_buff *sb; 832 + while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 833 + dev_kfree_skb_any(sb); 834 + free_scq(card->scq0, NULL); 835 + } 836 + if (error >= 14) { 837 + struct sk_buff *lb; 838 + while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) 839 + dev_kfree_skb_any(lb); 840 + } 841 + if (error >= 13) { 842 + struct sk_buff *hb; 843 + while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) 844 + dev_kfree_skb_any(hb); 845 + } 846 + if (error >= 12) { 847 + kfree(card->rsq.org); 848 + } 849 + if (error >= 11) { 850 + kfree(card->tsq.org); 851 + } 852 + if (error >= 10) { 853 + free_irq(card->pcidev->irq, card); 854 + } 855 + if (error >= 4) { 856 + iounmap(card->membase); 857 + } 858 + if (error >= 3) { 859 + pci_disable_device(card->pcidev); 860 + kfree(card); 861 + } 901 862 } 902 - 903 - 904 863 905 864 static scq_info *get_scq(int size, u32 scd) 906 865 { 907 - scq_info *scq; 908 - int i; 866 + scq_info *scq; 867 + int i; 909 868 910 - if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) 911 - return NULL; 869 + if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) 870 + return NULL; 912 871 913 - scq = kmalloc(sizeof(scq_info), GFP_KERNEL); 914 - if (scq == NULL) 915 - return NULL; 916 - scq->org = kmalloc(2 * size, GFP_KERNEL); 917 - if (scq->org == NULL) 918 - { 919 - kfree(scq); 920 - return NULL; 921 - } 922 - scq->skb = kmalloc(sizeof(struct sk_buff *) * 923 - (size / NS_SCQE_SIZE), GFP_KERNEL); 924 - if (scq->skb == NULL) 925 - { 926 - kfree(scq->org); 927 - kfree(scq); 928 - return NULL; 929 - } 930 - scq->num_entries = size / NS_SCQE_SIZE; 931 - scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); 932 - scq->next = scq->base; 933 - scq->last = scq->base + (scq->num_entries - 1); 934 - scq->tail = scq->last; 935 - scq->scd = scd; 936 - scq->num_entries = size / NS_SCQE_SIZE; 937 - scq->tbd_count = 0; 938 - init_waitqueue_head(&scq->scqfull_waitq); 939 - scq->full = 0; 940 - spin_lock_init(&scq->lock); 872 + scq = kmalloc(sizeof(scq_info), GFP_KERNEL); 873 + if (scq == NULL) 874 + return NULL; 875 + scq->org = kmalloc(2 * size, GFP_KERNEL); 876 + if (scq->org == NULL) { 877 + kfree(scq); 878 + return NULL; 879 + } 880 + scq->skb = kmalloc(sizeof(struct sk_buff *) * 881 + (size / NS_SCQE_SIZE), GFP_KERNEL); 882 + if (scq->skb == NULL) { 883 + kfree(scq->org); 884 + kfree(scq); 885 + return NULL; 886 + } 887 + scq->num_entries = size / NS_SCQE_SIZE; 888 + scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); 889 + scq->next = scq->base; 890 + scq->last = scq->base + (scq->num_entries - 1); 891 + scq->tail = scq->last; 892 + scq->scd = scd; 893 + scq->num_entries = size / NS_SCQE_SIZE; 894 + scq->tbd_count = 0; 895 + init_waitqueue_head(&scq->scqfull_waitq); 896 + scq->full = 0; 897 + spin_lock_init(&scq->lock); 941 898 942 - for (i = 0; i < scq->num_entries; i++) 943 - scq->skb[i] = NULL; 899 + for (i = 0; i < scq->num_entries; i++) 900 + scq->skb[i] = NULL; 944 901 945 - return scq; 902 + return scq; 946 903 } 947 - 948 - 949 904 950 905 /* For variable rate SCQ vcc must be NULL */ 951 - static void free_scq(scq_info *scq, struct atm_vcc *vcc) 906 + static void free_scq(scq_info * scq, struct atm_vcc *vcc) 952 907 { 953 - int i; 908 + int i; 954 909 955 - if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) 956 - for (i = 0; i < scq->num_entries; i++) 957 - { 958 - if (scq->skb[i] != NULL) 959 - { 960 - vcc = ATM_SKB(scq->skb[i])->vcc; 961 - if (vcc->pop != NULL) 962 - vcc->pop(vcc, scq->skb[i]); 963 - else 964 - dev_kfree_skb_any(scq->skb[i]); 965 - } 966 - } 967 - else /* vcc must be != NULL */ 968 - { 969 - if (vcc == NULL) 970 - { 971 - printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); 972 - for (i = 0; i < scq->num_entries; i++) 973 - dev_kfree_skb_any(scq->skb[i]); 974 - } 975 - else 976 - for (i = 0; i < scq->num_entries; i++) 977 - { 978 - if (scq->skb[i] != NULL) 979 - { 980 - if (vcc->pop != NULL) 981 - vcc->pop(vcc, scq->skb[i]); 982 - else 983 - dev_kfree_skb_any(scq->skb[i]); 984 - } 985 - } 986 - } 987 - kfree(scq->skb); 988 - kfree(scq->org); 989 - kfree(scq); 910 + if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) 911 + for (i = 0; i < scq->num_entries; i++) { 912 + if (scq->skb[i] != NULL) { 913 + vcc = ATM_SKB(scq->skb[i])->vcc; 914 + if (vcc->pop != NULL) 915 + vcc->pop(vcc, scq->skb[i]); 916 + else 917 + dev_kfree_skb_any(scq->skb[i]); 918 + } 919 + } else { /* vcc must be != NULL */ 920 + 921 + if (vcc == NULL) { 922 + printk 923 + ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); 924 + for (i = 0; i < scq->num_entries; i++) 925 + dev_kfree_skb_any(scq->skb[i]); 926 + } else 927 + for (i = 0; i < scq->num_entries; i++) { 928 + if (scq->skb[i] != NULL) { 929 + if (vcc->pop != NULL) 930 + vcc->pop(vcc, scq->skb[i]); 931 + else 932 + dev_kfree_skb_any(scq->skb[i]); 933 + } 934 + } 935 + } 936 + kfree(scq->skb); 937 + kfree(scq->org); 938 + kfree(scq); 990 939 } 991 - 992 - 993 940 994 941 /* The handles passed must be pointers to the sk_buff containing the small 995 942 or large buffer(s) cast to u32. */ 996 - static void push_rxbufs(ns_dev *card, struct sk_buff *skb) 943 + static void push_rxbufs(ns_dev * card, struct sk_buff *skb) 997 944 { 998 - struct ns_skb_cb *cb = NS_SKB_CB(skb); 999 - u32 handle1, addr1; 1000 - u32 handle2, addr2; 1001 - u32 stat; 1002 - unsigned long flags; 1003 - 1004 - /* *BARF* */ 1005 - handle2 = addr2 = 0; 1006 - handle1 = (u32)skb; 1007 - addr1 = (u32)virt_to_bus(skb->data); 945 + struct ns_skb_cb *cb = NS_SKB_CB(skb); 946 + u32 handle1, addr1; 947 + u32 handle2, addr2; 948 + u32 stat; 949 + unsigned long flags; 950 + 951 + /* *BARF* */ 952 + handle2 = addr2 = 0; 953 + handle1 = (u32) skb; 954 + addr1 = (u32) virt_to_bus(skb->data); 1008 955 1009 956 #ifdef GENERAL_DEBUG 1010 - if (!addr1) 1011 - printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); 957 + if (!addr1) 958 + printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", 959 + card->index); 1012 960 #endif /* GENERAL_DEBUG */ 1013 961 1014 - stat = readl(card->membase + STAT); 1015 - card->sbfqc = ns_stat_sfbqc_get(stat); 1016 - card->lbfqc = ns_stat_lfbqc_get(stat); 1017 - if (cb->buf_type == BUF_SM) 1018 - { 1019 - if (!addr2) 1020 - { 1021 - if (card->sm_addr) 1022 - { 1023 - addr2 = card->sm_addr; 1024 - handle2 = card->sm_handle; 1025 - card->sm_addr = 0x00000000; 1026 - card->sm_handle = 0x00000000; 1027 - } 1028 - else /* (!sm_addr) */ 1029 - { 1030 - card->sm_addr = addr1; 1031 - card->sm_handle = handle1; 1032 - } 1033 - } 1034 - } 1035 - else /* buf_type == BUF_LG */ 1036 - { 1037 - if (!addr2) 1038 - { 1039 - if (card->lg_addr) 1040 - { 1041 - addr2 = card->lg_addr; 1042 - handle2 = card->lg_handle; 1043 - card->lg_addr = 0x00000000; 1044 - card->lg_handle = 0x00000000; 1045 - } 1046 - else /* (!lg_addr) */ 1047 - { 1048 - card->lg_addr = addr1; 1049 - card->lg_handle = handle1; 1050 - } 1051 - } 1052 - } 962 + stat = readl(card->membase + STAT); 963 + card->sbfqc = ns_stat_sfbqc_get(stat); 964 + card->lbfqc = ns_stat_lfbqc_get(stat); 965 + if (cb->buf_type == BUF_SM) { 966 + if (!addr2) { 967 + if (card->sm_addr) { 968 + addr2 = card->sm_addr; 969 + handle2 = card->sm_handle; 970 + card->sm_addr = 0x00000000; 971 + card->sm_handle = 0x00000000; 972 + } else { /* (!sm_addr) */ 1053 973 1054 - if (addr2) 1055 - { 1056 - if (cb->buf_type == BUF_SM) 1057 - { 1058 - if (card->sbfqc >= card->sbnr.max) 1059 - { 1060 - skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue); 1061 - dev_kfree_skb_any((struct sk_buff *) handle1); 1062 - skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue); 1063 - dev_kfree_skb_any((struct sk_buff *) handle2); 1064 - return; 1065 - } 1066 - else 1067 - card->sbfqc += 2; 1068 - } 1069 - else /* (buf_type == BUF_LG) */ 1070 - { 1071 - if (card->lbfqc >= card->lbnr.max) 1072 - { 1073 - skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue); 1074 - dev_kfree_skb_any((struct sk_buff *) handle1); 1075 - skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue); 1076 - dev_kfree_skb_any((struct sk_buff *) handle2); 1077 - return; 1078 - } 1079 - else 1080 - card->lbfqc += 2; 1081 - } 974 + card->sm_addr = addr1; 975 + card->sm_handle = handle1; 976 + } 977 + } 978 + } else { /* buf_type == BUF_LG */ 1082 979 1083 - spin_lock_irqsave(&card->res_lock, flags); 980 + if (!addr2) { 981 + if (card->lg_addr) { 982 + addr2 = card->lg_addr; 983 + handle2 = card->lg_handle; 984 + card->lg_addr = 0x00000000; 985 + card->lg_handle = 0x00000000; 986 + } else { /* (!lg_addr) */ 1084 987 1085 - while (CMD_BUSY(card)); 1086 - writel(addr2, card->membase + DR3); 1087 - writel(handle2, card->membase + DR2); 1088 - writel(addr1, card->membase + DR1); 1089 - writel(handle1, card->membase + DR0); 1090 - writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD); 1091 - 1092 - spin_unlock_irqrestore(&card->res_lock, flags); 988 + card->lg_addr = addr1; 989 + card->lg_handle = handle1; 990 + } 991 + } 992 + } 1093 993 1094 - XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, 1095 - (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2); 1096 - } 994 + if (addr2) { 995 + if (cb->buf_type == BUF_SM) { 996 + if (card->sbfqc >= card->sbnr.max) { 997 + skb_unlink((struct sk_buff *)handle1, 998 + &card->sbpool.queue); 999 + dev_kfree_skb_any((struct sk_buff *)handle1); 1000 + skb_unlink((struct sk_buff *)handle2, 1001 + &card->sbpool.queue); 1002 + dev_kfree_skb_any((struct sk_buff *)handle2); 1003 + return; 1004 + } else 1005 + card->sbfqc += 2; 1006 + } else { /* (buf_type == BUF_LG) */ 1097 1007 1098 - if (!card->efbie && card->sbfqc >= card->sbnr.min && 1099 - card->lbfqc >= card->lbnr.min) 1100 - { 1101 - card->efbie = 1; 1102 - writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); 1103 - } 1008 + if (card->lbfqc >= card->lbnr.max) { 1009 + skb_unlink((struct sk_buff *)handle1, 1010 + &card->lbpool.queue); 1011 + dev_kfree_skb_any((struct sk_buff *)handle1); 1012 + skb_unlink((struct sk_buff *)handle2, 1013 + &card->lbpool.queue); 1014 + dev_kfree_skb_any((struct sk_buff *)handle2); 1015 + return; 1016 + } else 1017 + card->lbfqc += 2; 1018 + } 1104 1019 1105 - return; 1020 + spin_lock_irqsave(&card->res_lock, flags); 1021 + 1022 + while (CMD_BUSY(card)) ; 1023 + writel(addr2, card->membase + DR3); 1024 + writel(handle2, card->membase + DR2); 1025 + writel(addr1, card->membase + DR1); 1026 + writel(handle1, card->membase + DR0); 1027 + writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, 1028 + card->membase + CMD); 1029 + 1030 + spin_unlock_irqrestore(&card->res_lock, flags); 1031 + 1032 + XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", 1033 + card->index, 1034 + (cb->buf_type == BUF_SM ? "small" : "large"), addr1, 1035 + addr2); 1036 + } 1037 + 1038 + if (!card->efbie && card->sbfqc >= card->sbnr.min && 1039 + card->lbfqc >= card->lbnr.min) { 1040 + card->efbie = 1; 1041 + writel((readl(card->membase + CFG) | NS_CFG_EFBIE), 1042 + card->membase + CFG); 1043 + } 1044 + 1045 + return; 1106 1046 } 1107 - 1108 - 1109 1047 1110 1048 static irqreturn_t ns_irq_handler(int irq, void *dev_id) 1111 1049 { 1112 - u32 stat_r; 1113 - ns_dev *card; 1114 - struct atm_dev *dev; 1115 - unsigned long flags; 1050 + u32 stat_r; 1051 + ns_dev *card; 1052 + struct atm_dev *dev; 1053 + unsigned long flags; 1116 1054 1117 - card = (ns_dev *) dev_id; 1118 - dev = card->atmdev; 1119 - card->intcnt++; 1055 + card = (ns_dev *) dev_id; 1056 + dev = card->atmdev; 1057 + card->intcnt++; 1120 1058 1121 - PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); 1059 + PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); 1122 1060 1123 - spin_lock_irqsave(&card->int_lock, flags); 1124 - 1125 - stat_r = readl(card->membase + STAT); 1061 + spin_lock_irqsave(&card->int_lock, flags); 1126 1062 1127 - /* Transmit Status Indicator has been written to T. S. Queue */ 1128 - if (stat_r & NS_STAT_TSIF) 1129 - { 1130 - TXPRINTK("nicstar%d: TSI interrupt\n", card->index); 1131 - process_tsq(card); 1132 - writel(NS_STAT_TSIF, card->membase + STAT); 1133 - } 1134 - 1135 - /* Incomplete CS-PDU has been transmitted */ 1136 - if (stat_r & NS_STAT_TXICP) 1137 - { 1138 - writel(NS_STAT_TXICP, card->membase + STAT); 1139 - TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", 1140 - card->index); 1141 - } 1142 - 1143 - /* Transmit Status Queue 7/8 full */ 1144 - if (stat_r & NS_STAT_TSQF) 1145 - { 1146 - writel(NS_STAT_TSQF, card->membase + STAT); 1147 - PRINTK("nicstar%d: TSQ full.\n", card->index); 1148 - process_tsq(card); 1149 - } 1150 - 1151 - /* Timer overflow */ 1152 - if (stat_r & NS_STAT_TMROF) 1153 - { 1154 - writel(NS_STAT_TMROF, card->membase + STAT); 1155 - PRINTK("nicstar%d: Timer overflow.\n", card->index); 1156 - } 1157 - 1158 - /* PHY device interrupt signal active */ 1159 - if (stat_r & NS_STAT_PHYI) 1160 - { 1161 - writel(NS_STAT_PHYI, card->membase + STAT); 1162 - PRINTK("nicstar%d: PHY interrupt.\n", card->index); 1163 - if (dev->phy && dev->phy->interrupt) { 1164 - dev->phy->interrupt(dev); 1165 - } 1166 - } 1063 + stat_r = readl(card->membase + STAT); 1167 1064 1168 - /* Small Buffer Queue is full */ 1169 - if (stat_r & NS_STAT_SFBQF) 1170 - { 1171 - writel(NS_STAT_SFBQF, card->membase + STAT); 1172 - printk("nicstar%d: Small free buffer queue is full.\n", card->index); 1173 - } 1174 - 1175 - /* Large Buffer Queue is full */ 1176 - if (stat_r & NS_STAT_LFBQF) 1177 - { 1178 - writel(NS_STAT_LFBQF, card->membase + STAT); 1179 - printk("nicstar%d: Large free buffer queue is full.\n", card->index); 1180 - } 1065 + /* Transmit Status Indicator has been written to T. S. Queue */ 1066 + if (stat_r & NS_STAT_TSIF) { 1067 + TXPRINTK("nicstar%d: TSI interrupt\n", card->index); 1068 + process_tsq(card); 1069 + writel(NS_STAT_TSIF, card->membase + STAT); 1070 + } 1181 1071 1182 - /* Receive Status Queue is full */ 1183 - if (stat_r & NS_STAT_RSQF) 1184 - { 1185 - writel(NS_STAT_RSQF, card->membase + STAT); 1186 - printk("nicstar%d: RSQ full.\n", card->index); 1187 - process_rsq(card); 1188 - } 1072 + /* Incomplete CS-PDU has been transmitted */ 1073 + if (stat_r & NS_STAT_TXICP) { 1074 + writel(NS_STAT_TXICP, card->membase + STAT); 1075 + TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", 1076 + card->index); 1077 + } 1189 1078 1190 - /* Complete CS-PDU received */ 1191 - if (stat_r & NS_STAT_EOPDU) 1192 - { 1193 - RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); 1194 - process_rsq(card); 1195 - writel(NS_STAT_EOPDU, card->membase + STAT); 1196 - } 1079 + /* Transmit Status Queue 7/8 full */ 1080 + if (stat_r & NS_STAT_TSQF) { 1081 + writel(NS_STAT_TSQF, card->membase + STAT); 1082 + PRINTK("nicstar%d: TSQ full.\n", card->index); 1083 + process_tsq(card); 1084 + } 1197 1085 1198 - /* Raw cell received */ 1199 - if (stat_r & NS_STAT_RAWCF) 1200 - { 1201 - writel(NS_STAT_RAWCF, card->membase + STAT); 1086 + /* Timer overflow */ 1087 + if (stat_r & NS_STAT_TMROF) { 1088 + writel(NS_STAT_TMROF, card->membase + STAT); 1089 + PRINTK("nicstar%d: Timer overflow.\n", card->index); 1090 + } 1091 + 1092 + /* PHY device interrupt signal active */ 1093 + if (stat_r & NS_STAT_PHYI) { 1094 + writel(NS_STAT_PHYI, card->membase + STAT); 1095 + PRINTK("nicstar%d: PHY interrupt.\n", card->index); 1096 + if (dev->phy && dev->phy->interrupt) { 1097 + dev->phy->interrupt(dev); 1098 + } 1099 + } 1100 + 1101 + /* Small Buffer Queue is full */ 1102 + if (stat_r & NS_STAT_SFBQF) { 1103 + writel(NS_STAT_SFBQF, card->membase + STAT); 1104 + printk("nicstar%d: Small free buffer queue is full.\n", 1105 + card->index); 1106 + } 1107 + 1108 + /* Large Buffer Queue is full */ 1109 + if (stat_r & NS_STAT_LFBQF) { 1110 + writel(NS_STAT_LFBQF, card->membase + STAT); 1111 + printk("nicstar%d: Large free buffer queue is full.\n", 1112 + card->index); 1113 + } 1114 + 1115 + /* Receive Status Queue is full */ 1116 + if (stat_r & NS_STAT_RSQF) { 1117 + writel(NS_STAT_RSQF, card->membase + STAT); 1118 + printk("nicstar%d: RSQ full.\n", card->index); 1119 + process_rsq(card); 1120 + } 1121 + 1122 + /* Complete CS-PDU received */ 1123 + if (stat_r & NS_STAT_EOPDU) { 1124 + RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); 1125 + process_rsq(card); 1126 + writel(NS_STAT_EOPDU, card->membase + STAT); 1127 + } 1128 + 1129 + /* Raw cell received */ 1130 + if (stat_r & NS_STAT_RAWCF) { 1131 + writel(NS_STAT_RAWCF, card->membase + STAT); 1202 1132 #ifndef RCQ_SUPPORT 1203 - printk("nicstar%d: Raw cell received and no support yet...\n", 1204 - card->index); 1133 + printk("nicstar%d: Raw cell received and no support yet...\n", 1134 + card->index); 1205 1135 #endif /* RCQ_SUPPORT */ 1206 - /* NOTE: the following procedure may keep a raw cell pending until the 1207 - next interrupt. As this preliminary support is only meant to 1208 - avoid buffer leakage, this is not an issue. */ 1209 - while (readl(card->membase + RAWCT) != card->rawch) 1210 - { 1211 - ns_rcqe *rawcell; 1136 + /* NOTE: the following procedure may keep a raw cell pending until the 1137 + next interrupt. As this preliminary support is only meant to 1138 + avoid buffer leakage, this is not an issue. */ 1139 + while (readl(card->membase + RAWCT) != card->rawch) { 1140 + ns_rcqe *rawcell; 1212 1141 1213 - rawcell = (ns_rcqe *) bus_to_virt(card->rawch); 1214 - if (ns_rcqe_islast(rawcell)) 1215 - { 1216 - struct sk_buff *oldbuf; 1142 + rawcell = (ns_rcqe *) bus_to_virt(card->rawch); 1143 + if (ns_rcqe_islast(rawcell)) { 1144 + struct sk_buff *oldbuf; 1217 1145 1218 - oldbuf = card->rcbuf; 1219 - card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell); 1220 - card->rawch = (u32) virt_to_bus(card->rcbuf->data); 1221 - recycle_rx_buf(card, oldbuf); 1222 - } 1223 - else 1224 - card->rawch += NS_RCQE_SIZE; 1225 - } 1226 - } 1146 + oldbuf = card->rcbuf; 1147 + card->rcbuf = 1148 + (struct sk_buff *) 1149 + ns_rcqe_nextbufhandle(rawcell); 1150 + card->rawch = 1151 + (u32) virt_to_bus(card->rcbuf->data); 1152 + recycle_rx_buf(card, oldbuf); 1153 + } else 1154 + card->rawch += NS_RCQE_SIZE; 1155 + } 1156 + } 1227 1157 1228 - /* Small buffer queue is empty */ 1229 - if (stat_r & NS_STAT_SFBQE) 1230 - { 1231 - int i; 1232 - struct sk_buff *sb; 1158 + /* Small buffer queue is empty */ 1159 + if (stat_r & NS_STAT_SFBQE) { 1160 + int i; 1161 + struct sk_buff *sb; 1233 1162 1234 - writel(NS_STAT_SFBQE, card->membase + STAT); 1235 - printk("nicstar%d: Small free buffer queue empty.\n", 1236 - card->index); 1237 - for (i = 0; i < card->sbnr.min; i++) 1238 - { 1239 - sb = dev_alloc_skb(NS_SMSKBSIZE); 1240 - if (sb == NULL) 1241 - { 1242 - writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); 1243 - card->efbie = 0; 1244 - break; 1245 - } 1246 - NS_SKB_CB(sb)->buf_type = BUF_SM; 1247 - skb_queue_tail(&card->sbpool.queue, sb); 1248 - skb_reserve(sb, NS_AAL0_HEADER); 1249 - push_rxbufs(card, sb); 1250 - } 1251 - card->sbfqc = i; 1252 - process_rsq(card); 1253 - } 1163 + writel(NS_STAT_SFBQE, card->membase + STAT); 1164 + printk("nicstar%d: Small free buffer queue empty.\n", 1165 + card->index); 1166 + for (i = 0; i < card->sbnr.min; i++) { 1167 + sb = dev_alloc_skb(NS_SMSKBSIZE); 1168 + if (sb == NULL) { 1169 + writel(readl(card->membase + CFG) & 1170 + ~NS_CFG_EFBIE, card->membase + CFG); 1171 + card->efbie = 0; 1172 + break; 1173 + } 1174 + NS_SKB_CB(sb)->buf_type = BUF_SM; 1175 + skb_queue_tail(&card->sbpool.queue, sb); 1176 + skb_reserve(sb, NS_AAL0_HEADER); 1177 + push_rxbufs(card, sb); 1178 + } 1179 + card->sbfqc = i; 1180 + process_rsq(card); 1181 + } 1254 1182 1255 - /* Large buffer queue empty */ 1256 - if (stat_r & NS_STAT_LFBQE) 1257 - { 1258 - int i; 1259 - struct sk_buff *lb; 1183 + /* Large buffer queue empty */ 1184 + if (stat_r & NS_STAT_LFBQE) { 1185 + int i; 1186 + struct sk_buff *lb; 1260 1187 1261 - writel(NS_STAT_LFBQE, card->membase + STAT); 1262 - printk("nicstar%d: Large free buffer queue empty.\n", 1263 - card->index); 1264 - for (i = 0; i < card->lbnr.min; i++) 1265 - { 1266 - lb = dev_alloc_skb(NS_LGSKBSIZE); 1267 - if (lb == NULL) 1268 - { 1269 - writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); 1270 - card->efbie = 0; 1271 - break; 1272 - } 1273 - NS_SKB_CB(lb)->buf_type = BUF_LG; 1274 - skb_queue_tail(&card->lbpool.queue, lb); 1275 - skb_reserve(lb, NS_SMBUFSIZE); 1276 - push_rxbufs(card, lb); 1277 - } 1278 - card->lbfqc = i; 1279 - process_rsq(card); 1280 - } 1188 + writel(NS_STAT_LFBQE, card->membase + STAT); 1189 + printk("nicstar%d: Large free buffer queue empty.\n", 1190 + card->index); 1191 + for (i = 0; i < card->lbnr.min; i++) { 1192 + lb = dev_alloc_skb(NS_LGSKBSIZE); 1193 + if (lb == NULL) { 1194 + writel(readl(card->membase + CFG) & 1195 + ~NS_CFG_EFBIE, card->membase + CFG); 1196 + card->efbie = 0; 1197 + break; 1198 + } 1199 + NS_SKB_CB(lb)->buf_type = BUF_LG; 1200 + skb_queue_tail(&card->lbpool.queue, lb); 1201 + skb_reserve(lb, NS_SMBUFSIZE); 1202 + push_rxbufs(card, lb); 1203 + } 1204 + card->lbfqc = i; 1205 + process_rsq(card); 1206 + } 1281 1207 1282 - /* Receive Status Queue is 7/8 full */ 1283 - if (stat_r & NS_STAT_RSQAF) 1284 - { 1285 - writel(NS_STAT_RSQAF, card->membase + STAT); 1286 - RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); 1287 - process_rsq(card); 1288 - } 1289 - 1290 - spin_unlock_irqrestore(&card->int_lock, flags); 1291 - PRINTK("nicstar%d: end of interrupt service\n", card->index); 1292 - return IRQ_HANDLED; 1208 + /* Receive Status Queue is 7/8 full */ 1209 + if (stat_r & NS_STAT_RSQAF) { 1210 + writel(NS_STAT_RSQAF, card->membase + STAT); 1211 + RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); 1212 + process_rsq(card); 1213 + } 1214 + 1215 + spin_unlock_irqrestore(&card->int_lock, flags); 1216 + PRINTK("nicstar%d: end of interrupt service\n", card->index); 1217 + return IRQ_HANDLED; 1293 1218 } 1294 - 1295 - 1296 1219 1297 1220 static int ns_open(struct atm_vcc *vcc) 1298 1221 { 1299 - ns_dev *card; 1300 - vc_map *vc; 1301 - unsigned long tmpl, modl; 1302 - int tcr, tcra; /* target cell rate, and absolute value */ 1303 - int n = 0; /* Number of entries in the TST. Initialized to remove 1304 - the compiler warning. */ 1305 - u32 u32d[4]; 1306 - int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler 1307 - warning. How I wish compilers were clever enough to 1308 - tell which variables can truly be used 1309 - uninitialized... */ 1310 - int inuse; /* tx or rx vc already in use by another vcc */ 1311 - short vpi = vcc->vpi; 1312 - int vci = vcc->vci; 1222 + ns_dev *card; 1223 + vc_map *vc; 1224 + unsigned long tmpl, modl; 1225 + int tcr, tcra; /* target cell rate, and absolute value */ 1226 + int n = 0; /* Number of entries in the TST. Initialized to remove 1227 + the compiler warning. */ 1228 + u32 u32d[4]; 1229 + int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler 1230 + warning. How I wish compilers were clever enough to 1231 + tell which variables can truly be used 1232 + uninitialized... */ 1233 + int inuse; /* tx or rx vc already in use by another vcc */ 1234 + short vpi = vcc->vpi; 1235 + int vci = vcc->vci; 1313 1236 1314 - card = (ns_dev *) vcc->dev->dev_data; 1315 - PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci); 1316 - if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) 1317 - { 1318 - PRINTK("nicstar%d: unsupported AAL.\n", card->index); 1319 - return -EINVAL; 1320 - } 1237 + card = (ns_dev *) vcc->dev->dev_data; 1238 + PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, 1239 + vci); 1240 + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { 1241 + PRINTK("nicstar%d: unsupported AAL.\n", card->index); 1242 + return -EINVAL; 1243 + } 1321 1244 1322 - vc = &(card->vcmap[vpi << card->vcibits | vci]); 1323 - vcc->dev_data = vc; 1245 + vc = &(card->vcmap[vpi << card->vcibits | vci]); 1246 + vcc->dev_data = vc; 1324 1247 1325 - inuse = 0; 1326 - if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) 1327 - inuse = 1; 1328 - if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) 1329 - inuse += 2; 1330 - if (inuse) 1331 - { 1332 - printk("nicstar%d: %s vci already in use.\n", card->index, 1333 - inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); 1334 - return -EINVAL; 1335 - } 1248 + inuse = 0; 1249 + if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) 1250 + inuse = 1; 1251 + if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) 1252 + inuse += 2; 1253 + if (inuse) { 1254 + printk("nicstar%d: %s vci already in use.\n", card->index, 1255 + inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); 1256 + return -EINVAL; 1257 + } 1336 1258 1337 - set_bit(ATM_VF_ADDR,&vcc->flags); 1259 + set_bit(ATM_VF_ADDR, &vcc->flags); 1338 1260 1339 - /* NOTE: You are not allowed to modify an open connection's QOS. To change 1340 - that, remove the ATM_VF_PARTIAL flag checking. There may be other changes 1341 - needed to do that. */ 1342 - if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) 1343 - { 1344 - scq_info *scq; 1345 - 1346 - set_bit(ATM_VF_PARTIAL,&vcc->flags); 1347 - if (vcc->qos.txtp.traffic_class == ATM_CBR) 1348 - { 1349 - /* Check requested cell rate and availability of SCD */ 1350 - if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && 1351 - vcc->qos.txtp.min_pcr == 0) 1352 - { 1353 - PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", 1354 - card->index); 1355 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1356 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1357 - return -EINVAL; 1358 - } 1261 + /* NOTE: You are not allowed to modify an open connection's QOS. To change 1262 + that, remove the ATM_VF_PARTIAL flag checking. There may be other changes 1263 + needed to do that. */ 1264 + if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { 1265 + scq_info *scq; 1359 1266 1360 - tcr = atm_pcr_goal(&(vcc->qos.txtp)); 1361 - tcra = tcr >= 0 ? tcr : -tcr; 1362 - 1363 - PRINTK("nicstar%d: target cell rate = %d.\n", card->index, 1364 - vcc->qos.txtp.max_pcr); 1267 + set_bit(ATM_VF_PARTIAL, &vcc->flags); 1268 + if (vcc->qos.txtp.traffic_class == ATM_CBR) { 1269 + /* Check requested cell rate and availability of SCD */ 1270 + if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 1271 + && vcc->qos.txtp.min_pcr == 0) { 1272 + PRINTK 1273 + ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", 1274 + card->index); 1275 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1276 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1277 + return -EINVAL; 1278 + } 1365 1279 1366 - tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES; 1367 - modl = tmpl % card->max_pcr; 1280 + tcr = atm_pcr_goal(&(vcc->qos.txtp)); 1281 + tcra = tcr >= 0 ? tcr : -tcr; 1368 1282 1369 - n = (int)(tmpl / card->max_pcr); 1370 - if (tcr > 0) 1371 - { 1372 - if (modl > 0) n++; 1373 - } 1374 - else if (tcr == 0) 1375 - { 1376 - if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) 1377 - { 1378 - PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index); 1379 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1380 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1381 - return -EINVAL; 1382 - } 1383 - } 1283 + PRINTK("nicstar%d: target cell rate = %d.\n", 1284 + card->index, vcc->qos.txtp.max_pcr); 1384 1285 1385 - if (n == 0) 1386 - { 1387 - printk("nicstar%d: selected bandwidth < granularity.\n", card->index); 1388 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1389 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1390 - return -EINVAL; 1391 - } 1286 + tmpl = 1287 + (unsigned long)tcra *(unsigned long) 1288 + NS_TST_NUM_ENTRIES; 1289 + modl = tmpl % card->max_pcr; 1392 1290 1393 - if (n > (card->tst_free_entries - NS_TST_RESERVED)) 1394 - { 1395 - PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index); 1396 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1397 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1398 - return -EINVAL; 1399 - } 1400 - else 1401 - card->tst_free_entries -= n; 1291 + n = (int)(tmpl / card->max_pcr); 1292 + if (tcr > 0) { 1293 + if (modl > 0) 1294 + n++; 1295 + } else if (tcr == 0) { 1296 + if ((n = 1297 + (card->tst_free_entries - 1298 + NS_TST_RESERVED)) <= 0) { 1299 + PRINTK 1300 + ("nicstar%d: no CBR bandwidth free.\n", 1301 + card->index); 1302 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1303 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1304 + return -EINVAL; 1305 + } 1306 + } 1402 1307 1403 - XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); 1404 - for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) 1405 - { 1406 - if (card->scd2vc[frscdi] == NULL) 1407 - { 1408 - card->scd2vc[frscdi] = vc; 1409 - break; 1410 - } 1411 - } 1412 - if (frscdi == NS_FRSCD_NUM) 1413 - { 1414 - PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index); 1415 - card->tst_free_entries += n; 1416 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1417 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1418 - return -EBUSY; 1419 - } 1308 + if (n == 0) { 1309 + printk 1310 + ("nicstar%d: selected bandwidth < granularity.\n", 1311 + card->index); 1312 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1313 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1314 + return -EINVAL; 1315 + } 1420 1316 1421 - vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; 1317 + if (n > (card->tst_free_entries - NS_TST_RESERVED)) { 1318 + PRINTK 1319 + ("nicstar%d: not enough free CBR bandwidth.\n", 1320 + card->index); 1321 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1322 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1323 + return -EINVAL; 1324 + } else 1325 + card->tst_free_entries -= n; 1422 1326 1423 - scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); 1424 - if (scq == NULL) 1425 - { 1426 - PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); 1427 - card->scd2vc[frscdi] = NULL; 1428 - card->tst_free_entries += n; 1429 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1430 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1431 - return -ENOMEM; 1432 - } 1433 - vc->scq = scq; 1434 - u32d[0] = (u32) virt_to_bus(scq->base); 1435 - u32d[1] = (u32) 0x00000000; 1436 - u32d[2] = (u32) 0xffffffff; 1437 - u32d[3] = (u32) 0x00000000; 1438 - ns_write_sram(card, vc->cbr_scd, u32d, 4); 1439 - 1440 - fill_tst(card, n, vc); 1441 - } 1442 - else if (vcc->qos.txtp.traffic_class == ATM_UBR) 1443 - { 1444 - vc->cbr_scd = 0x00000000; 1445 - vc->scq = card->scq0; 1446 - } 1447 - 1448 - if (vcc->qos.txtp.traffic_class != ATM_NONE) 1449 - { 1450 - vc->tx = 1; 1451 - vc->tx_vcc = vcc; 1452 - vc->tbd_count = 0; 1453 - } 1454 - if (vcc->qos.rxtp.traffic_class != ATM_NONE) 1455 - { 1456 - u32 status; 1457 - 1458 - vc->rx = 1; 1459 - vc->rx_vcc = vcc; 1460 - vc->rx_iov = NULL; 1327 + XPRINTK("nicstar%d: writing %d tst entries.\n", 1328 + card->index, n); 1329 + for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { 1330 + if (card->scd2vc[frscdi] == NULL) { 1331 + card->scd2vc[frscdi] = vc; 1332 + break; 1333 + } 1334 + } 1335 + if (frscdi == NS_FRSCD_NUM) { 1336 + PRINTK 1337 + ("nicstar%d: no SCD available for CBR channel.\n", 1338 + card->index); 1339 + card->tst_free_entries += n; 1340 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1341 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1342 + return -EBUSY; 1343 + } 1461 1344 1462 - /* Open the connection in hardware */ 1463 - if (vcc->qos.aal == ATM_AAL5) 1464 - status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; 1465 - else /* vcc->qos.aal == ATM_AAL0 */ 1466 - status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; 1345 + vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; 1346 + 1347 + scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); 1348 + if (scq == NULL) { 1349 + PRINTK("nicstar%d: can't get fixed rate SCQ.\n", 1350 + card->index); 1351 + card->scd2vc[frscdi] = NULL; 1352 + card->tst_free_entries += n; 1353 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1354 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1355 + return -ENOMEM; 1356 + } 1357 + vc->scq = scq; 1358 + u32d[0] = (u32) virt_to_bus(scq->base); 1359 + u32d[1] = (u32) 0x00000000; 1360 + u32d[2] = (u32) 0xffffffff; 1361 + u32d[3] = (u32) 0x00000000; 1362 + ns_write_sram(card, vc->cbr_scd, u32d, 4); 1363 + 1364 + fill_tst(card, n, vc); 1365 + } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { 1366 + vc->cbr_scd = 0x00000000; 1367 + vc->scq = card->scq0; 1368 + } 1369 + 1370 + if (vcc->qos.txtp.traffic_class != ATM_NONE) { 1371 + vc->tx = 1; 1372 + vc->tx_vcc = vcc; 1373 + vc->tbd_count = 0; 1374 + } 1375 + if (vcc->qos.rxtp.traffic_class != ATM_NONE) { 1376 + u32 status; 1377 + 1378 + vc->rx = 1; 1379 + vc->rx_vcc = vcc; 1380 + vc->rx_iov = NULL; 1381 + 1382 + /* Open the connection in hardware */ 1383 + if (vcc->qos.aal == ATM_AAL5) 1384 + status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; 1385 + else /* vcc->qos.aal == ATM_AAL0 */ 1386 + status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; 1467 1387 #ifdef RCQ_SUPPORT 1468 - status |= NS_RCTE_RAWCELLINTEN; 1388 + status |= NS_RCTE_RAWCELLINTEN; 1469 1389 #endif /* RCQ_SUPPORT */ 1470 - ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * 1471 - NS_RCT_ENTRY_SIZE, &status, 1); 1472 - } 1473 - 1474 - } 1475 - 1476 - set_bit(ATM_VF_READY,&vcc->flags); 1477 - return 0; 1390 + ns_write_sram(card, 1391 + NS_RCT + 1392 + (vpi << card->vcibits | vci) * 1393 + NS_RCT_ENTRY_SIZE, &status, 1); 1394 + } 1395 + 1396 + } 1397 + 1398 + set_bit(ATM_VF_READY, &vcc->flags); 1399 + return 0; 1478 1400 } 1479 - 1480 - 1481 1401 1482 1402 static void ns_close(struct atm_vcc *vcc) 1483 1403 { 1484 - vc_map *vc; 1485 - ns_dev *card; 1486 - u32 data; 1487 - int i; 1488 - 1489 - vc = vcc->dev_data; 1490 - card = vcc->dev->dev_data; 1491 - PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, 1492 - (int) vcc->vpi, vcc->vci); 1404 + vc_map *vc; 1405 + ns_dev *card; 1406 + u32 data; 1407 + int i; 1493 1408 1494 - clear_bit(ATM_VF_READY,&vcc->flags); 1495 - 1496 - if (vcc->qos.rxtp.traffic_class != ATM_NONE) 1497 - { 1498 - u32 addr; 1499 - unsigned long flags; 1500 - 1501 - addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; 1502 - spin_lock_irqsave(&card->res_lock, flags); 1503 - while(CMD_BUSY(card)); 1504 - writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); 1505 - spin_unlock_irqrestore(&card->res_lock, flags); 1506 - 1507 - vc->rx = 0; 1508 - if (vc->rx_iov != NULL) 1509 - { 1510 - struct sk_buff *iovb; 1511 - u32 stat; 1512 - 1513 - stat = readl(card->membase + STAT); 1514 - card->sbfqc = ns_stat_sfbqc_get(stat); 1515 - card->lbfqc = ns_stat_lfbqc_get(stat); 1409 + vc = vcc->dev_data; 1410 + card = vcc->dev->dev_data; 1411 + PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, 1412 + (int)vcc->vpi, vcc->vci); 1516 1413 1517 - PRINTK("nicstar%d: closing a VC with pending rx buffers.\n", 1518 - card->index); 1519 - iovb = vc->rx_iov; 1520 - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, 1521 - NS_SKB(iovb)->iovcnt); 1522 - NS_SKB(iovb)->iovcnt = 0; 1523 - NS_SKB(iovb)->vcc = NULL; 1524 - spin_lock_irqsave(&card->int_lock, flags); 1525 - recycle_iov_buf(card, iovb); 1526 - spin_unlock_irqrestore(&card->int_lock, flags); 1527 - vc->rx_iov = NULL; 1528 - } 1529 - } 1414 + clear_bit(ATM_VF_READY, &vcc->flags); 1530 1415 1531 - if (vcc->qos.txtp.traffic_class != ATM_NONE) 1532 - { 1533 - vc->tx = 0; 1534 - } 1416 + if (vcc->qos.rxtp.traffic_class != ATM_NONE) { 1417 + u32 addr; 1418 + unsigned long flags; 1535 1419 1536 - if (vcc->qos.txtp.traffic_class == ATM_CBR) 1537 - { 1538 - unsigned long flags; 1539 - ns_scqe *scqep; 1540 - scq_info *scq; 1420 + addr = 1421 + NS_RCT + 1422 + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; 1423 + spin_lock_irqsave(&card->res_lock, flags); 1424 + while (CMD_BUSY(card)) ; 1425 + writel(NS_CMD_CLOSE_CONNECTION | addr << 2, 1426 + card->membase + CMD); 1427 + spin_unlock_irqrestore(&card->res_lock, flags); 1541 1428 1542 - scq = vc->scq; 1429 + vc->rx = 0; 1430 + if (vc->rx_iov != NULL) { 1431 + struct sk_buff *iovb; 1432 + u32 stat; 1543 1433 1544 - for (;;) 1545 - { 1546 - spin_lock_irqsave(&scq->lock, flags); 1547 - scqep = scq->next; 1548 - if (scqep == scq->base) 1549 - scqep = scq->last; 1550 - else 1551 - scqep--; 1552 - if (scqep == scq->tail) 1553 - { 1554 - spin_unlock_irqrestore(&scq->lock, flags); 1555 - break; 1556 - } 1557 - /* If the last entry is not a TSR, place one in the SCQ in order to 1558 - be able to completely drain it and then close. */ 1559 - if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) 1560 - { 1561 - ns_scqe tsr; 1562 - u32 scdi, scqi; 1563 - u32 data; 1564 - int index; 1434 + stat = readl(card->membase + STAT); 1435 + card->sbfqc = ns_stat_sfbqc_get(stat); 1436 + card->lbfqc = ns_stat_lfbqc_get(stat); 1565 1437 1566 - tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); 1567 - scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; 1568 - scqi = scq->next - scq->base; 1569 - tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); 1570 - tsr.word_3 = 0x00000000; 1571 - tsr.word_4 = 0x00000000; 1572 - *scq->next = tsr; 1573 - index = (int) scqi; 1574 - scq->skb[index] = NULL; 1575 - if (scq->next == scq->last) 1576 - scq->next = scq->base; 1577 - else 1578 - scq->next++; 1579 - data = (u32) virt_to_bus(scq->next); 1580 - ns_write_sram(card, scq->scd, &data, 1); 1581 - } 1582 - spin_unlock_irqrestore(&scq->lock, flags); 1583 - schedule(); 1584 - } 1438 + PRINTK 1439 + ("nicstar%d: closing a VC with pending rx buffers.\n", 1440 + card->index); 1441 + iovb = vc->rx_iov; 1442 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 1443 + NS_SKB(iovb)->iovcnt); 1444 + NS_SKB(iovb)->iovcnt = 0; 1445 + NS_SKB(iovb)->vcc = NULL; 1446 + spin_lock_irqsave(&card->int_lock, flags); 1447 + recycle_iov_buf(card, iovb); 1448 + spin_unlock_irqrestore(&card->int_lock, flags); 1449 + vc->rx_iov = NULL; 1450 + } 1451 + } 1585 1452 1586 - /* Free all TST entries */ 1587 - data = NS_TST_OPCODE_VARIABLE; 1588 - for (i = 0; i < NS_TST_NUM_ENTRIES; i++) 1589 - { 1590 - if (card->tste2vc[i] == vc) 1591 - { 1592 - ns_write_sram(card, card->tst_addr + i, &data, 1); 1593 - card->tste2vc[i] = NULL; 1594 - card->tst_free_entries++; 1595 - } 1596 - } 1597 - 1598 - card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; 1599 - free_scq(vc->scq, vcc); 1600 - } 1453 + if (vcc->qos.txtp.traffic_class != ATM_NONE) { 1454 + vc->tx = 0; 1455 + } 1601 1456 1602 - /* remove all references to vcc before deleting it */ 1603 - if (vcc->qos.txtp.traffic_class != ATM_NONE) 1604 - { 1605 - unsigned long flags; 1606 - scq_info *scq = card->scq0; 1457 + if (vcc->qos.txtp.traffic_class == ATM_CBR) { 1458 + unsigned long flags; 1459 + ns_scqe *scqep; 1460 + scq_info *scq; 1607 1461 1608 - spin_lock_irqsave(&scq->lock, flags); 1462 + scq = vc->scq; 1609 1463 1610 - for(i = 0; i < scq->num_entries; i++) { 1611 - if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { 1612 - ATM_SKB(scq->skb[i])->vcc = NULL; 1613 - atm_return(vcc, scq->skb[i]->truesize); 1614 - PRINTK("nicstar: deleted pending vcc mapping\n"); 1615 - } 1616 - } 1464 + for (;;) { 1465 + spin_lock_irqsave(&scq->lock, flags); 1466 + scqep = scq->next; 1467 + if (scqep == scq->base) 1468 + scqep = scq->last; 1469 + else 1470 + scqep--; 1471 + if (scqep == scq->tail) { 1472 + spin_unlock_irqrestore(&scq->lock, flags); 1473 + break; 1474 + } 1475 + /* If the last entry is not a TSR, place one in the SCQ in order to 1476 + be able to completely drain it and then close. */ 1477 + if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { 1478 + ns_scqe tsr; 1479 + u32 scdi, scqi; 1480 + u32 data; 1481 + int index; 1617 1482 1618 - spin_unlock_irqrestore(&scq->lock, flags); 1619 - } 1483 + tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); 1484 + scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; 1485 + scqi = scq->next - scq->base; 1486 + tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); 1487 + tsr.word_3 = 0x00000000; 1488 + tsr.word_4 = 0x00000000; 1489 + *scq->next = tsr; 1490 + index = (int)scqi; 1491 + scq->skb[index] = NULL; 1492 + if (scq->next == scq->last) 1493 + scq->next = scq->base; 1494 + else 1495 + scq->next++; 1496 + data = (u32) virt_to_bus(scq->next); 1497 + ns_write_sram(card, scq->scd, &data, 1); 1498 + } 1499 + spin_unlock_irqrestore(&scq->lock, flags); 1500 + schedule(); 1501 + } 1620 1502 1621 - vcc->dev_data = NULL; 1622 - clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1623 - clear_bit(ATM_VF_ADDR,&vcc->flags); 1503 + /* Free all TST entries */ 1504 + data = NS_TST_OPCODE_VARIABLE; 1505 + for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { 1506 + if (card->tste2vc[i] == vc) { 1507 + ns_write_sram(card, card->tst_addr + i, &data, 1508 + 1); 1509 + card->tste2vc[i] = NULL; 1510 + card->tst_free_entries++; 1511 + } 1512 + } 1513 + 1514 + card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; 1515 + free_scq(vc->scq, vcc); 1516 + } 1517 + 1518 + /* remove all references to vcc before deleting it */ 1519 + if (vcc->qos.txtp.traffic_class != ATM_NONE) { 1520 + unsigned long flags; 1521 + scq_info *scq = card->scq0; 1522 + 1523 + spin_lock_irqsave(&scq->lock, flags); 1524 + 1525 + for (i = 0; i < scq->num_entries; i++) { 1526 + if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { 1527 + ATM_SKB(scq->skb[i])->vcc = NULL; 1528 + atm_return(vcc, scq->skb[i]->truesize); 1529 + PRINTK 1530 + ("nicstar: deleted pending vcc mapping\n"); 1531 + } 1532 + } 1533 + 1534 + spin_unlock_irqrestore(&scq->lock, flags); 1535 + } 1536 + 1537 + vcc->dev_data = NULL; 1538 + clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1539 + clear_bit(ATM_VF_ADDR, &vcc->flags); 1624 1540 1625 1541 #ifdef RX_DEBUG 1626 - { 1627 - u32 stat, cfg; 1628 - stat = readl(card->membase + STAT); 1629 - cfg = readl(card->membase + CFG); 1630 - printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); 1631 - printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", 1632 - (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last, 1633 - readl(card->membase + TSQT)); 1634 - printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", 1635 - (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last, 1636 - readl(card->membase + RSQT)); 1637 - printk("Empty free buffer queue interrupt %s \n", 1638 - card->efbie ? "enabled" : "disabled"); 1639 - printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", 1640 - ns_stat_sfbqc_get(stat), card->sbpool.count, 1641 - ns_stat_lfbqc_get(stat), card->lbpool.count); 1642 - printk("hbpool.count = %d iovpool.count = %d \n", 1643 - card->hbpool.count, card->iovpool.count); 1644 - } 1542 + { 1543 + u32 stat, cfg; 1544 + stat = readl(card->membase + STAT); 1545 + cfg = readl(card->membase + CFG); 1546 + printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); 1547 + printk 1548 + ("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", 1549 + (u32) card->tsq.base, (u32) card->tsq.next, 1550 + (u32) card->tsq.last, readl(card->membase + TSQT)); 1551 + printk 1552 + ("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", 1553 + (u32) card->rsq.base, (u32) card->rsq.next, 1554 + (u32) card->rsq.last, readl(card->membase + RSQT)); 1555 + printk("Empty free buffer queue interrupt %s \n", 1556 + card->efbie ? "enabled" : "disabled"); 1557 + printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", 1558 + ns_stat_sfbqc_get(stat), card->sbpool.count, 1559 + ns_stat_lfbqc_get(stat), card->lbpool.count); 1560 + printk("hbpool.count = %d iovpool.count = %d \n", 1561 + card->hbpool.count, card->iovpool.count); 1562 + } 1645 1563 #endif /* RX_DEBUG */ 1646 1564 } 1647 1565 1648 - 1649 - 1650 - static void fill_tst(ns_dev *card, int n, vc_map *vc) 1566 + static void fill_tst(ns_dev * card, int n, vc_map * vc) 1651 1567 { 1652 - u32 new_tst; 1653 - unsigned long cl; 1654 - int e, r; 1655 - u32 data; 1656 - 1657 - /* It would be very complicated to keep the two TSTs synchronized while 1658 - assuring that writes are only made to the inactive TST. So, for now I 1659 - will use only one TST. If problems occur, I will change this again */ 1660 - 1661 - new_tst = card->tst_addr; 1568 + u32 new_tst; 1569 + unsigned long cl; 1570 + int e, r; 1571 + u32 data; 1662 1572 1663 - /* Fill procedure */ 1573 + /* It would be very complicated to keep the two TSTs synchronized while 1574 + assuring that writes are only made to the inactive TST. So, for now I 1575 + will use only one TST. If problems occur, I will change this again */ 1664 1576 1665 - for (e = 0; e < NS_TST_NUM_ENTRIES; e++) 1666 - { 1667 - if (card->tste2vc[e] == NULL) 1668 - break; 1669 - } 1670 - if (e == NS_TST_NUM_ENTRIES) { 1671 - printk("nicstar%d: No free TST entries found. \n", card->index); 1672 - return; 1673 - } 1577 + new_tst = card->tst_addr; 1674 1578 1675 - r = n; 1676 - cl = NS_TST_NUM_ENTRIES; 1677 - data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); 1678 - 1679 - while (r > 0) 1680 - { 1681 - if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) 1682 - { 1683 - card->tste2vc[e] = vc; 1684 - ns_write_sram(card, new_tst + e, &data, 1); 1685 - cl -= NS_TST_NUM_ENTRIES; 1686 - r--; 1687 - } 1579 + /* Fill procedure */ 1688 1580 1689 - if (++e == NS_TST_NUM_ENTRIES) { 1690 - e = 0; 1691 - } 1692 - cl += n; 1693 - } 1694 - 1695 - /* End of fill procedure */ 1696 - 1697 - data = ns_tste_make(NS_TST_OPCODE_END, new_tst); 1698 - ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); 1699 - ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); 1700 - card->tst_addr = new_tst; 1581 + for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { 1582 + if (card->tste2vc[e] == NULL) 1583 + break; 1584 + } 1585 + if (e == NS_TST_NUM_ENTRIES) { 1586 + printk("nicstar%d: No free TST entries found. \n", card->index); 1587 + return; 1588 + } 1589 + 1590 + r = n; 1591 + cl = NS_TST_NUM_ENTRIES; 1592 + data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); 1593 + 1594 + while (r > 0) { 1595 + if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { 1596 + card->tste2vc[e] = vc; 1597 + ns_write_sram(card, new_tst + e, &data, 1); 1598 + cl -= NS_TST_NUM_ENTRIES; 1599 + r--; 1600 + } 1601 + 1602 + if (++e == NS_TST_NUM_ENTRIES) { 1603 + e = 0; 1604 + } 1605 + cl += n; 1606 + } 1607 + 1608 + /* End of fill procedure */ 1609 + 1610 + data = ns_tste_make(NS_TST_OPCODE_END, new_tst); 1611 + ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); 1612 + ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); 1613 + card->tst_addr = new_tst; 1701 1614 } 1702 - 1703 - 1704 1615 1705 1616 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) 1706 1617 { 1707 - ns_dev *card; 1708 - vc_map *vc; 1709 - scq_info *scq; 1710 - unsigned long buflen; 1711 - ns_scqe scqe; 1712 - u32 flags; /* TBD flags, not CPU flags */ 1713 - 1714 - card = vcc->dev->dev_data; 1715 - TXPRINTK("nicstar%d: ns_send() called.\n", card->index); 1716 - if ((vc = (vc_map *) vcc->dev_data) == NULL) 1717 - { 1718 - printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); 1719 - atomic_inc(&vcc->stats->tx_err); 1720 - dev_kfree_skb_any(skb); 1721 - return -EINVAL; 1722 - } 1723 - 1724 - if (!vc->tx) 1725 - { 1726 - printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); 1727 - atomic_inc(&vcc->stats->tx_err); 1728 - dev_kfree_skb_any(skb); 1729 - return -EINVAL; 1730 - } 1731 - 1732 - if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) 1733 - { 1734 - printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); 1735 - atomic_inc(&vcc->stats->tx_err); 1736 - dev_kfree_skb_any(skb); 1737 - return -EINVAL; 1738 - } 1739 - 1740 - if (skb_shinfo(skb)->nr_frags != 0) 1741 - { 1742 - printk("nicstar%d: No scatter-gather yet.\n", card->index); 1743 - atomic_inc(&vcc->stats->tx_err); 1744 - dev_kfree_skb_any(skb); 1745 - return -EINVAL; 1746 - } 1747 - 1748 - ATM_SKB(skb)->vcc = vcc; 1618 + ns_dev *card; 1619 + vc_map *vc; 1620 + scq_info *scq; 1621 + unsigned long buflen; 1622 + ns_scqe scqe; 1623 + u32 flags; /* TBD flags, not CPU flags */ 1749 1624 1750 - if (vcc->qos.aal == ATM_AAL5) 1751 - { 1752 - buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ 1753 - flags = NS_TBD_AAL5; 1754 - scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); 1755 - scqe.word_3 = cpu_to_le32((u32) skb->len); 1756 - scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, 1757 - ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0); 1758 - flags |= NS_TBD_EOPDU; 1759 - } 1760 - else /* (vcc->qos.aal == ATM_AAL0) */ 1761 - { 1762 - buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ 1763 - flags = NS_TBD_AAL0; 1764 - scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER); 1765 - scqe.word_3 = cpu_to_le32(0x00000000); 1766 - if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ 1767 - flags |= NS_TBD_EOPDU; 1768 - scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); 1769 - /* Force the VPI/VCI to be the same as in VCC struct */ 1770 - scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT | 1771 - ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) & 1772 - NS_TBD_VC_MASK); 1773 - } 1625 + card = vcc->dev->dev_data; 1626 + TXPRINTK("nicstar%d: ns_send() called.\n", card->index); 1627 + if ((vc = (vc_map *) vcc->dev_data) == NULL) { 1628 + printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", 1629 + card->index); 1630 + atomic_inc(&vcc->stats->tx_err); 1631 + dev_kfree_skb_any(skb); 1632 + return -EINVAL; 1633 + } 1774 1634 1775 - if (vcc->qos.txtp.traffic_class == ATM_CBR) 1776 - { 1777 - scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); 1778 - scq = ((vc_map *) vcc->dev_data)->scq; 1779 - } 1780 - else 1781 - { 1782 - scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); 1783 - scq = card->scq0; 1784 - } 1635 + if (!vc->tx) { 1636 + printk("nicstar%d: Trying to transmit on a non-tx VC.\n", 1637 + card->index); 1638 + atomic_inc(&vcc->stats->tx_err); 1639 + dev_kfree_skb_any(skb); 1640 + return -EINVAL; 1641 + } 1785 1642 1786 - if (push_scqe(card, vc, scq, &scqe, skb) != 0) 1787 - { 1788 - atomic_inc(&vcc->stats->tx_err); 1789 - dev_kfree_skb_any(skb); 1790 - return -EIO; 1791 - } 1792 - atomic_inc(&vcc->stats->tx); 1643 + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { 1644 + printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", 1645 + card->index); 1646 + atomic_inc(&vcc->stats->tx_err); 1647 + dev_kfree_skb_any(skb); 1648 + return -EINVAL; 1649 + } 1793 1650 1794 - return 0; 1651 + if (skb_shinfo(skb)->nr_frags != 0) { 1652 + printk("nicstar%d: No scatter-gather yet.\n", card->index); 1653 + atomic_inc(&vcc->stats->tx_err); 1654 + dev_kfree_skb_any(skb); 1655 + return -EINVAL; 1656 + } 1657 + 1658 + ATM_SKB(skb)->vcc = vcc; 1659 + 1660 + if (vcc->qos.aal == ATM_AAL5) { 1661 + buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ 1662 + flags = NS_TBD_AAL5; 1663 + scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); 1664 + scqe.word_3 = cpu_to_le32((u32) skb->len); 1665 + scqe.word_4 = 1666 + ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, 1667 + ATM_SKB(skb)-> 1668 + atm_options & ATM_ATMOPT_CLP ? 1 : 0); 1669 + flags |= NS_TBD_EOPDU; 1670 + } else { /* (vcc->qos.aal == ATM_AAL0) */ 1671 + 1672 + buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ 1673 + flags = NS_TBD_AAL0; 1674 + scqe.word_2 = 1675 + cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER); 1676 + scqe.word_3 = cpu_to_le32(0x00000000); 1677 + if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ 1678 + flags |= NS_TBD_EOPDU; 1679 + scqe.word_4 = 1680 + cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); 1681 + /* Force the VPI/VCI to be the same as in VCC struct */ 1682 + scqe.word_4 |= 1683 + cpu_to_le32((((u32) vcc-> 1684 + vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> 1685 + vci) << 1686 + NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); 1687 + } 1688 + 1689 + if (vcc->qos.txtp.traffic_class == ATM_CBR) { 1690 + scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); 1691 + scq = ((vc_map *) vcc->dev_data)->scq; 1692 + } else { 1693 + scqe.word_1 = 1694 + ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); 1695 + scq = card->scq0; 1696 + } 1697 + 1698 + if (push_scqe(card, vc, scq, &scqe, skb) != 0) { 1699 + atomic_inc(&vcc->stats->tx_err); 1700 + dev_kfree_skb_any(skb); 1701 + return -EIO; 1702 + } 1703 + atomic_inc(&vcc->stats->tx); 1704 + 1705 + return 0; 1795 1706 } 1796 1707 1797 - 1798 - 1799 - static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, 1800 - struct sk_buff *skb) 1708 + static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 1709 + struct sk_buff *skb) 1801 1710 { 1802 - unsigned long flags; 1803 - ns_scqe tsr; 1804 - u32 scdi, scqi; 1805 - int scq_is_vbr; 1806 - u32 data; 1807 - int index; 1808 - 1809 - spin_lock_irqsave(&scq->lock, flags); 1810 - while (scq->tail == scq->next) 1811 - { 1812 - if (in_interrupt()) { 1813 - spin_unlock_irqrestore(&scq->lock, flags); 1814 - printk("nicstar%d: Error pushing TBD.\n", card->index); 1815 - return 1; 1816 - } 1711 + unsigned long flags; 1712 + ns_scqe tsr; 1713 + u32 scdi, scqi; 1714 + int scq_is_vbr; 1715 + u32 data; 1716 + int index; 1817 1717 1818 - scq->full = 1; 1819 - spin_unlock_irqrestore(&scq->lock, flags); 1820 - interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); 1821 - spin_lock_irqsave(&scq->lock, flags); 1718 + spin_lock_irqsave(&scq->lock, flags); 1719 + while (scq->tail == scq->next) { 1720 + if (in_interrupt()) { 1721 + spin_unlock_irqrestore(&scq->lock, flags); 1722 + printk("nicstar%d: Error pushing TBD.\n", card->index); 1723 + return 1; 1724 + } 1822 1725 1823 - if (scq->full) { 1824 - spin_unlock_irqrestore(&scq->lock, flags); 1825 - printk("nicstar%d: Timeout pushing TBD.\n", card->index); 1826 - return 1; 1827 - } 1828 - } 1829 - *scq->next = *tbd; 1830 - index = (int) (scq->next - scq->base); 1831 - scq->skb[index] = skb; 1832 - XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", 1833 - card->index, (u32) skb, index); 1834 - XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", 1835 - card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), 1836 - le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), 1837 - (u32) scq->next); 1838 - if (scq->next == scq->last) 1839 - scq->next = scq->base; 1840 - else 1841 - scq->next++; 1726 + scq->full = 1; 1727 + spin_unlock_irqrestore(&scq->lock, flags); 1728 + interruptible_sleep_on_timeout(&scq->scqfull_waitq, 1729 + SCQFULL_TIMEOUT); 1730 + spin_lock_irqsave(&scq->lock, flags); 1842 1731 1843 - vc->tbd_count++; 1844 - if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) 1845 - { 1846 - scq->tbd_count++; 1847 - scq_is_vbr = 1; 1848 - } 1849 - else 1850 - scq_is_vbr = 0; 1732 + if (scq->full) { 1733 + spin_unlock_irqrestore(&scq->lock, flags); 1734 + printk("nicstar%d: Timeout pushing TBD.\n", 1735 + card->index); 1736 + return 1; 1737 + } 1738 + } 1739 + *scq->next = *tbd; 1740 + index = (int)(scq->next - scq->base); 1741 + scq->skb[index] = skb; 1742 + XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", 1743 + card->index, (u32) skb, index); 1744 + XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", 1745 + card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), 1746 + le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), 1747 + (u32) scq->next); 1748 + if (scq->next == scq->last) 1749 + scq->next = scq->base; 1750 + else 1751 + scq->next++; 1851 1752 1852 - if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) 1853 - { 1854 - int has_run = 0; 1753 + vc->tbd_count++; 1754 + if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { 1755 + scq->tbd_count++; 1756 + scq_is_vbr = 1; 1757 + } else 1758 + scq_is_vbr = 0; 1855 1759 1856 - while (scq->tail == scq->next) 1857 - { 1858 - if (in_interrupt()) { 1859 - data = (u32) virt_to_bus(scq->next); 1860 - ns_write_sram(card, scq->scd, &data, 1); 1861 - spin_unlock_irqrestore(&scq->lock, flags); 1862 - printk("nicstar%d: Error pushing TSR.\n", card->index); 1863 - return 0; 1864 - } 1760 + if (vc->tbd_count >= MAX_TBD_PER_VC 1761 + || scq->tbd_count >= MAX_TBD_PER_SCQ) { 1762 + int has_run = 0; 1865 1763 1866 - scq->full = 1; 1867 - if (has_run++) break; 1868 - spin_unlock_irqrestore(&scq->lock, flags); 1869 - interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); 1870 - spin_lock_irqsave(&scq->lock, flags); 1871 - } 1764 + while (scq->tail == scq->next) { 1765 + if (in_interrupt()) { 1766 + data = (u32) virt_to_bus(scq->next); 1767 + ns_write_sram(card, scq->scd, &data, 1); 1768 + spin_unlock_irqrestore(&scq->lock, flags); 1769 + printk("nicstar%d: Error pushing TSR.\n", 1770 + card->index); 1771 + return 0; 1772 + } 1872 1773 1873 - if (!scq->full) 1874 - { 1875 - tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); 1876 - if (scq_is_vbr) 1877 - scdi = NS_TSR_SCDISVBR; 1878 - else 1879 - scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; 1880 - scqi = scq->next - scq->base; 1881 - tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); 1882 - tsr.word_3 = 0x00000000; 1883 - tsr.word_4 = 0x00000000; 1774 + scq->full = 1; 1775 + if (has_run++) 1776 + break; 1777 + spin_unlock_irqrestore(&scq->lock, flags); 1778 + interruptible_sleep_on_timeout(&scq->scqfull_waitq, 1779 + SCQFULL_TIMEOUT); 1780 + spin_lock_irqsave(&scq->lock, flags); 1781 + } 1884 1782 1885 - *scq->next = tsr; 1886 - index = (int) scqi; 1887 - scq->skb[index] = NULL; 1888 - XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", 1889 - card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), 1890 - le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), 1891 - (u32) scq->next); 1892 - if (scq->next == scq->last) 1893 - scq->next = scq->base; 1894 - else 1895 - scq->next++; 1896 - vc->tbd_count = 0; 1897 - scq->tbd_count = 0; 1898 - } 1899 - else 1900 - PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); 1901 - } 1902 - data = (u32) virt_to_bus(scq->next); 1903 - ns_write_sram(card, scq->scd, &data, 1); 1904 - 1905 - spin_unlock_irqrestore(&scq->lock, flags); 1906 - 1907 - return 0; 1783 + if (!scq->full) { 1784 + tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); 1785 + if (scq_is_vbr) 1786 + scdi = NS_TSR_SCDISVBR; 1787 + else 1788 + scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; 1789 + scqi = scq->next - scq->base; 1790 + tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); 1791 + tsr.word_3 = 0x00000000; 1792 + tsr.word_4 = 0x00000000; 1793 + 1794 + *scq->next = tsr; 1795 + index = (int)scqi; 1796 + scq->skb[index] = NULL; 1797 + XPRINTK 1798 + ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", 1799 + card->index, le32_to_cpu(tsr.word_1), 1800 + le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), 1801 + le32_to_cpu(tsr.word_4), (u32) scq->next); 1802 + if (scq->next == scq->last) 1803 + scq->next = scq->base; 1804 + else 1805 + scq->next++; 1806 + vc->tbd_count = 0; 1807 + scq->tbd_count = 0; 1808 + } else 1809 + PRINTK("nicstar%d: Timeout pushing TSR.\n", 1810 + card->index); 1811 + } 1812 + data = (u32) virt_to_bus(scq->next); 1813 + ns_write_sram(card, scq->scd, &data, 1); 1814 + 1815 + spin_unlock_irqrestore(&scq->lock, flags); 1816 + 1817 + return 0; 1908 1818 } 1909 1819 1910 - 1911 - 1912 - static void process_tsq(ns_dev *card) 1820 + static void process_tsq(ns_dev * card) 1913 1821 { 1914 - u32 scdi; 1915 - scq_info *scq; 1916 - ns_tsi *previous = NULL, *one_ahead, *two_ahead; 1917 - int serviced_entries; /* flag indicating at least on entry was serviced */ 1918 - 1919 - serviced_entries = 0; 1920 - 1921 - if (card->tsq.next == card->tsq.last) 1922 - one_ahead = card->tsq.base; 1923 - else 1924 - one_ahead = card->tsq.next + 1; 1822 + u32 scdi; 1823 + scq_info *scq; 1824 + ns_tsi *previous = NULL, *one_ahead, *two_ahead; 1825 + int serviced_entries; /* flag indicating at least on entry was serviced */ 1925 1826 1926 - if (one_ahead == card->tsq.last) 1927 - two_ahead = card->tsq.base; 1928 - else 1929 - two_ahead = one_ahead + 1; 1930 - 1931 - while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || 1932 - !ns_tsi_isempty(two_ahead)) 1933 - /* At most two empty, as stated in the 77201 errata */ 1934 - { 1935 - serviced_entries = 1; 1936 - 1937 - /* Skip the one or two possible empty entries */ 1938 - while (ns_tsi_isempty(card->tsq.next)) { 1939 - if (card->tsq.next == card->tsq.last) 1940 - card->tsq.next = card->tsq.base; 1941 - else 1942 - card->tsq.next++; 1943 - } 1944 - 1945 - if (!ns_tsi_tmrof(card->tsq.next)) 1946 - { 1947 - scdi = ns_tsi_getscdindex(card->tsq.next); 1948 - if (scdi == NS_TSI_SCDISVBR) 1949 - scq = card->scq0; 1950 - else 1951 - { 1952 - if (card->scd2vc[scdi] == NULL) 1953 - { 1954 - printk("nicstar%d: could not find VC from SCD index.\n", 1955 - card->index); 1956 - ns_tsi_init(card->tsq.next); 1957 - return; 1958 - } 1959 - scq = card->scd2vc[scdi]->scq; 1960 - } 1961 - drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); 1962 - scq->full = 0; 1963 - wake_up_interruptible(&(scq->scqfull_waitq)); 1964 - } 1827 + serviced_entries = 0; 1965 1828 1966 - ns_tsi_init(card->tsq.next); 1967 - previous = card->tsq.next; 1968 - if (card->tsq.next == card->tsq.last) 1969 - card->tsq.next = card->tsq.base; 1970 - else 1971 - card->tsq.next++; 1829 + if (card->tsq.next == card->tsq.last) 1830 + one_ahead = card->tsq.base; 1831 + else 1832 + one_ahead = card->tsq.next + 1; 1972 1833 1973 - if (card->tsq.next == card->tsq.last) 1974 - one_ahead = card->tsq.base; 1975 - else 1976 - one_ahead = card->tsq.next + 1; 1834 + if (one_ahead == card->tsq.last) 1835 + two_ahead = card->tsq.base; 1836 + else 1837 + two_ahead = one_ahead + 1; 1977 1838 1978 - if (one_ahead == card->tsq.last) 1979 - two_ahead = card->tsq.base; 1980 - else 1981 - two_ahead = one_ahead + 1; 1982 - } 1839 + while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || 1840 + !ns_tsi_isempty(two_ahead)) 1841 + /* At most two empty, as stated in the 77201 errata */ 1842 + { 1843 + serviced_entries = 1; 1983 1844 1984 - if (serviced_entries) { 1985 - writel((((u32) previous) - ((u32) card->tsq.base)), 1986 - card->membase + TSQH); 1987 - } 1845 + /* Skip the one or two possible empty entries */ 1846 + while (ns_tsi_isempty(card->tsq.next)) { 1847 + if (card->tsq.next == card->tsq.last) 1848 + card->tsq.next = card->tsq.base; 1849 + else 1850 + card->tsq.next++; 1851 + } 1852 + 1853 + if (!ns_tsi_tmrof(card->tsq.next)) { 1854 + scdi = ns_tsi_getscdindex(card->tsq.next); 1855 + if (scdi == NS_TSI_SCDISVBR) 1856 + scq = card->scq0; 1857 + else { 1858 + if (card->scd2vc[scdi] == NULL) { 1859 + printk 1860 + ("nicstar%d: could not find VC from SCD index.\n", 1861 + card->index); 1862 + ns_tsi_init(card->tsq.next); 1863 + return; 1864 + } 1865 + scq = card->scd2vc[scdi]->scq; 1866 + } 1867 + drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); 1868 + scq->full = 0; 1869 + wake_up_interruptible(&(scq->scqfull_waitq)); 1870 + } 1871 + 1872 + ns_tsi_init(card->tsq.next); 1873 + previous = card->tsq.next; 1874 + if (card->tsq.next == card->tsq.last) 1875 + card->tsq.next = card->tsq.base; 1876 + else 1877 + card->tsq.next++; 1878 + 1879 + if (card->tsq.next == card->tsq.last) 1880 + one_ahead = card->tsq.base; 1881 + else 1882 + one_ahead = card->tsq.next + 1; 1883 + 1884 + if (one_ahead == card->tsq.last) 1885 + two_ahead = card->tsq.base; 1886 + else 1887 + two_ahead = one_ahead + 1; 1888 + } 1889 + 1890 + if (serviced_entries) { 1891 + writel((((u32) previous) - ((u32) card->tsq.base)), 1892 + card->membase + TSQH); 1893 + } 1988 1894 } 1989 1895 1990 - 1991 - 1992 - static void drain_scq(ns_dev *card, scq_info *scq, int pos) 1896 + static void drain_scq(ns_dev * card, scq_info * scq, int pos) 1993 1897 { 1994 - struct atm_vcc *vcc; 1995 - struct sk_buff *skb; 1996 - int i; 1997 - unsigned long flags; 1998 - 1999 - XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", 2000 - card->index, (u32) scq, pos); 2001 - if (pos >= scq->num_entries) 2002 - { 2003 - printk("nicstar%d: Bad index on drain_scq().\n", card->index); 2004 - return; 2005 - } 1898 + struct atm_vcc *vcc; 1899 + struct sk_buff *skb; 1900 + int i; 1901 + unsigned long flags; 2006 1902 2007 - spin_lock_irqsave(&scq->lock, flags); 2008 - i = (int) (scq->tail - scq->base); 2009 - if (++i == scq->num_entries) 2010 - i = 0; 2011 - while (i != pos) 2012 - { 2013 - skb = scq->skb[i]; 2014 - XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", 2015 - card->index, (u32) skb, i); 2016 - if (skb != NULL) 2017 - { 2018 - vcc = ATM_SKB(skb)->vcc; 2019 - if (vcc && vcc->pop != NULL) { 2020 - vcc->pop(vcc, skb); 2021 - } else { 2022 - dev_kfree_skb_irq(skb); 2023 - } 2024 - scq->skb[i] = NULL; 2025 - } 2026 - if (++i == scq->num_entries) 2027 - i = 0; 2028 - } 2029 - scq->tail = scq->base + pos; 2030 - spin_unlock_irqrestore(&scq->lock, flags); 1903 + XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", 1904 + card->index, (u32) scq, pos); 1905 + if (pos >= scq->num_entries) { 1906 + printk("nicstar%d: Bad index on drain_scq().\n", card->index); 1907 + return; 1908 + } 1909 + 1910 + spin_lock_irqsave(&scq->lock, flags); 1911 + i = (int)(scq->tail - scq->base); 1912 + if (++i == scq->num_entries) 1913 + i = 0; 1914 + while (i != pos) { 1915 + skb = scq->skb[i]; 1916 + XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", 1917 + card->index, (u32) skb, i); 1918 + if (skb != NULL) { 1919 + vcc = ATM_SKB(skb)->vcc; 1920 + if (vcc && vcc->pop != NULL) { 1921 + vcc->pop(vcc, skb); 1922 + } else { 1923 + dev_kfree_skb_irq(skb); 1924 + } 1925 + scq->skb[i] = NULL; 1926 + } 1927 + if (++i == scq->num_entries) 1928 + i = 0; 1929 + } 1930 + scq->tail = scq->base + pos; 1931 + spin_unlock_irqrestore(&scq->lock, flags); 2031 1932 } 2032 1933 2033 - 2034 - 2035 - static void process_rsq(ns_dev *card) 1934 + static void process_rsq(ns_dev * card) 2036 1935 { 2037 - ns_rsqe *previous; 1936 + ns_rsqe *previous; 2038 1937 2039 - if (!ns_rsqe_valid(card->rsq.next)) 2040 - return; 2041 - do { 2042 - dequeue_rx(card, card->rsq.next); 2043 - ns_rsqe_init(card->rsq.next); 2044 - previous = card->rsq.next; 2045 - if (card->rsq.next == card->rsq.last) 2046 - card->rsq.next = card->rsq.base; 2047 - else 2048 - card->rsq.next++; 2049 - } while (ns_rsqe_valid(card->rsq.next)); 2050 - writel((((u32) previous) - ((u32) card->rsq.base)), 2051 - card->membase + RSQH); 1938 + if (!ns_rsqe_valid(card->rsq.next)) 1939 + return; 1940 + do { 1941 + dequeue_rx(card, card->rsq.next); 1942 + ns_rsqe_init(card->rsq.next); 1943 + previous = card->rsq.next; 1944 + if (card->rsq.next == card->rsq.last) 1945 + card->rsq.next = card->rsq.base; 1946 + else 1947 + card->rsq.next++; 1948 + } while (ns_rsqe_valid(card->rsq.next)); 1949 + writel((((u32) previous) - ((u32) card->rsq.base)), 1950 + card->membase + RSQH); 2052 1951 } 2053 1952 2054 - 2055 - 2056 - static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) 1953 + static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) 2057 1954 { 2058 - u32 vpi, vci; 2059 - vc_map *vc; 2060 - struct sk_buff *iovb; 2061 - struct iovec *iov; 2062 - struct atm_vcc *vcc; 2063 - struct sk_buff *skb; 2064 - unsigned short aal5_len; 2065 - int len; 2066 - u32 stat; 1955 + u32 vpi, vci; 1956 + vc_map *vc; 1957 + struct sk_buff *iovb; 1958 + struct iovec *iov; 1959 + struct atm_vcc *vcc; 1960 + struct sk_buff *skb; 1961 + unsigned short aal5_len; 1962 + int len; 1963 + u32 stat; 2067 1964 2068 - stat = readl(card->membase + STAT); 2069 - card->sbfqc = ns_stat_sfbqc_get(stat); 2070 - card->lbfqc = ns_stat_lfbqc_get(stat); 1965 + stat = readl(card->membase + STAT); 1966 + card->sbfqc = ns_stat_sfbqc_get(stat); 1967 + card->lbfqc = ns_stat_lfbqc_get(stat); 2071 1968 2072 - skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle); 2073 - vpi = ns_rsqe_vpi(rsqe); 2074 - vci = ns_rsqe_vci(rsqe); 2075 - if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) 2076 - { 2077 - printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", 2078 - card->index, vpi, vci); 2079 - recycle_rx_buf(card, skb); 2080 - return; 2081 - } 2082 - 2083 - vc = &(card->vcmap[vpi << card->vcibits | vci]); 2084 - if (!vc->rx) 2085 - { 2086 - RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", 2087 - card->index, vpi, vci); 2088 - recycle_rx_buf(card, skb); 2089 - return; 2090 - } 1969 + skb = (struct sk_buff *)le32_to_cpu(rsqe->buffer_handle); 1970 + vpi = ns_rsqe_vpi(rsqe); 1971 + vci = ns_rsqe_vci(rsqe); 1972 + if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { 1973 + printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", 1974 + card->index, vpi, vci); 1975 + recycle_rx_buf(card, skb); 1976 + return; 1977 + } 2091 1978 2092 - vcc = vc->rx_vcc; 1979 + vc = &(card->vcmap[vpi << card->vcibits | vci]); 1980 + if (!vc->rx) { 1981 + RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", 1982 + card->index, vpi, vci); 1983 + recycle_rx_buf(card, skb); 1984 + return; 1985 + } 2093 1986 2094 - if (vcc->qos.aal == ATM_AAL0) 2095 - { 2096 - struct sk_buff *sb; 2097 - unsigned char *cell; 2098 - int i; 1987 + vcc = vc->rx_vcc; 2099 1988 2100 - cell = skb->data; 2101 - for (i = ns_rsqe_cellcount(rsqe); i; i--) 2102 - { 2103 - if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) 2104 - { 2105 - printk("nicstar%d: Can't allocate buffers for aal0.\n", 2106 - card->index); 2107 - atomic_add(i,&vcc->stats->rx_drop); 2108 - break; 2109 - } 2110 - if (!atm_charge(vcc, sb->truesize)) 2111 - { 2112 - RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", 2113 - card->index); 2114 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ 2115 - dev_kfree_skb_any(sb); 2116 - break; 2117 - } 2118 - /* Rebuild the header */ 2119 - *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | 2120 - (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); 2121 - if (i == 1 && ns_rsqe_eopdu(rsqe)) 2122 - *((u32 *) sb->data) |= 0x00000002; 2123 - skb_put(sb, NS_AAL0_HEADER); 2124 - memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); 2125 - skb_put(sb, ATM_CELL_PAYLOAD); 2126 - ATM_SKB(sb)->vcc = vcc; 2127 - __net_timestamp(sb); 2128 - vcc->push(vcc, sb); 2129 - atomic_inc(&vcc->stats->rx); 2130 - cell += ATM_CELL_PAYLOAD; 2131 - } 1989 + if (vcc->qos.aal == ATM_AAL0) { 1990 + struct sk_buff *sb; 1991 + unsigned char *cell; 1992 + int i; 2132 1993 2133 - recycle_rx_buf(card, skb); 2134 - return; 2135 - } 1994 + cell = skb->data; 1995 + for (i = ns_rsqe_cellcount(rsqe); i; i--) { 1996 + if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { 1997 + printk 1998 + ("nicstar%d: Can't allocate buffers for aal0.\n", 1999 + card->index); 2000 + atomic_add(i, &vcc->stats->rx_drop); 2001 + break; 2002 + } 2003 + if (!atm_charge(vcc, sb->truesize)) { 2004 + RXPRINTK 2005 + ("nicstar%d: atm_charge() dropped aal0 packets.\n", 2006 + card->index); 2007 + atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ 2008 + dev_kfree_skb_any(sb); 2009 + break; 2010 + } 2011 + /* Rebuild the header */ 2012 + *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | 2013 + (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); 2014 + if (i == 1 && ns_rsqe_eopdu(rsqe)) 2015 + *((u32 *) sb->data) |= 0x00000002; 2016 + skb_put(sb, NS_AAL0_HEADER); 2017 + memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); 2018 + skb_put(sb, ATM_CELL_PAYLOAD); 2019 + ATM_SKB(sb)->vcc = vcc; 2020 + __net_timestamp(sb); 2021 + vcc->push(vcc, sb); 2022 + atomic_inc(&vcc->stats->rx); 2023 + cell += ATM_CELL_PAYLOAD; 2024 + } 2136 2025 2137 - /* To reach this point, the AAL layer can only be AAL5 */ 2026 + recycle_rx_buf(card, skb); 2027 + return; 2028 + } 2138 2029 2139 - if ((iovb = vc->rx_iov) == NULL) 2140 - { 2141 - iovb = skb_dequeue(&(card->iovpool.queue)); 2142 - if (iovb == NULL) /* No buffers in the queue */ 2143 - { 2144 - iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); 2145 - if (iovb == NULL) 2146 - { 2147 - printk("nicstar%d: Out of iovec buffers.\n", card->index); 2148 - atomic_inc(&vcc->stats->rx_drop); 2149 - recycle_rx_buf(card, skb); 2150 - return; 2151 - } 2152 - NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2153 - } 2154 - else 2155 - if (--card->iovpool.count < card->iovnr.min) 2156 - { 2157 - struct sk_buff *new_iovb; 2158 - if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) 2159 - { 2160 - NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2161 - skb_queue_tail(&card->iovpool.queue, new_iovb); 2162 - card->iovpool.count++; 2163 - } 2164 - } 2165 - vc->rx_iov = iovb; 2166 - NS_SKB(iovb)->iovcnt = 0; 2167 - iovb->len = 0; 2168 - iovb->data = iovb->head; 2169 - skb_reset_tail_pointer(iovb); 2170 - NS_SKB(iovb)->vcc = vcc; 2171 - /* IMPORTANT: a pointer to the sk_buff containing the small or large 2172 - buffer is stored as iovec base, NOT a pointer to the 2173 - small or large buffer itself. */ 2174 - } 2175 - else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) 2176 - { 2177 - printk("nicstar%d: received too big AAL5 SDU.\n", card->index); 2178 - atomic_inc(&vcc->stats->rx_err); 2179 - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); 2180 - NS_SKB(iovb)->iovcnt = 0; 2181 - iovb->len = 0; 2182 - iovb->data = iovb->head; 2183 - skb_reset_tail_pointer(iovb); 2184 - NS_SKB(iovb)->vcc = vcc; 2185 - } 2186 - iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; 2187 - iov->iov_base = (void *) skb; 2188 - iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; 2189 - iovb->len += iov->iov_len; 2030 + /* To reach this point, the AAL layer can only be AAL5 */ 2190 2031 2191 - if (NS_SKB(iovb)->iovcnt == 1) 2192 - { 2193 - if (NS_SKB_CB(skb)->buf_type != BUF_SM) 2194 - { 2195 - printk("nicstar%d: Expected a small buffer, and this is not one.\n", 2196 - card->index); 2197 - which_list(card, skb); 2198 - atomic_inc(&vcc->stats->rx_err); 2199 - recycle_rx_buf(card, skb); 2200 - vc->rx_iov = NULL; 2201 - recycle_iov_buf(card, iovb); 2202 - return; 2203 - } 2204 - } 2205 - else /* NS_SKB(iovb)->iovcnt >= 2 */ 2206 - { 2207 - if (NS_SKB_CB(skb)->buf_type != BUF_LG) 2208 - { 2209 - printk("nicstar%d: Expected a large buffer, and this is not one.\n", 2210 - card->index); 2211 - which_list(card, skb); 2212 - atomic_inc(&vcc->stats->rx_err); 2213 - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, 2214 - NS_SKB(iovb)->iovcnt); 2215 - vc->rx_iov = NULL; 2216 - recycle_iov_buf(card, iovb); 2217 - return; 2218 - } 2219 - } 2032 + if ((iovb = vc->rx_iov) == NULL) { 2033 + iovb = skb_dequeue(&(card->iovpool.queue)); 2034 + if (iovb == NULL) { /* No buffers in the queue */ 2035 + iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); 2036 + if (iovb == NULL) { 2037 + printk("nicstar%d: Out of iovec buffers.\n", 2038 + card->index); 2039 + atomic_inc(&vcc->stats->rx_drop); 2040 + recycle_rx_buf(card, skb); 2041 + return; 2042 + } 2043 + NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2044 + } else if (--card->iovpool.count < card->iovnr.min) { 2045 + struct sk_buff *new_iovb; 2046 + if ((new_iovb = 2047 + alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { 2048 + NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2049 + skb_queue_tail(&card->iovpool.queue, new_iovb); 2050 + card->iovpool.count++; 2051 + } 2052 + } 2053 + vc->rx_iov = iovb; 2054 + NS_SKB(iovb)->iovcnt = 0; 2055 + iovb->len = 0; 2056 + iovb->data = iovb->head; 2057 + skb_reset_tail_pointer(iovb); 2058 + NS_SKB(iovb)->vcc = vcc; 2059 + /* IMPORTANT: a pointer to the sk_buff containing the small or large 2060 + buffer is stored as iovec base, NOT a pointer to the 2061 + small or large buffer itself. */ 2062 + } else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) { 2063 + printk("nicstar%d: received too big AAL5 SDU.\n", card->index); 2064 + atomic_inc(&vcc->stats->rx_err); 2065 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2066 + NS_MAX_IOVECS); 2067 + NS_SKB(iovb)->iovcnt = 0; 2068 + iovb->len = 0; 2069 + iovb->data = iovb->head; 2070 + skb_reset_tail_pointer(iovb); 2071 + NS_SKB(iovb)->vcc = vcc; 2072 + } 2073 + iov = &((struct iovec *)iovb->data)[NS_SKB(iovb)->iovcnt++]; 2074 + iov->iov_base = (void *)skb; 2075 + iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; 2076 + iovb->len += iov->iov_len; 2220 2077 2221 - if (ns_rsqe_eopdu(rsqe)) 2222 - { 2223 - /* This works correctly regardless of the endianness of the host */ 2224 - unsigned char *L1L2 = (unsigned char *)((u32)skb->data + 2225 - iov->iov_len - 6); 2226 - aal5_len = L1L2[0] << 8 | L1L2[1]; 2227 - len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; 2228 - if (ns_rsqe_crcerr(rsqe) || 2229 - len + 8 > iovb->len || len + (47 + 8) < iovb->len) 2230 - { 2231 - printk("nicstar%d: AAL5 CRC error", card->index); 2232 - if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) 2233 - printk(" - PDU size mismatch.\n"); 2234 - else 2235 - printk(".\n"); 2236 - atomic_inc(&vcc->stats->rx_err); 2237 - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, 2238 - NS_SKB(iovb)->iovcnt); 2239 - vc->rx_iov = NULL; 2240 - recycle_iov_buf(card, iovb); 2241 - return; 2242 - } 2078 + if (NS_SKB(iovb)->iovcnt == 1) { 2079 + if (NS_SKB_CB(skb)->buf_type != BUF_SM) { 2080 + printk 2081 + ("nicstar%d: Expected a small buffer, and this is not one.\n", 2082 + card->index); 2083 + which_list(card, skb); 2084 + atomic_inc(&vcc->stats->rx_err); 2085 + recycle_rx_buf(card, skb); 2086 + vc->rx_iov = NULL; 2087 + recycle_iov_buf(card, iovb); 2088 + return; 2089 + } 2090 + } else { /* NS_SKB(iovb)->iovcnt >= 2 */ 2243 2091 2244 - /* By this point we (hopefully) have a complete SDU without errors. */ 2092 + if (NS_SKB_CB(skb)->buf_type != BUF_LG) { 2093 + printk 2094 + ("nicstar%d: Expected a large buffer, and this is not one.\n", 2095 + card->index); 2096 + which_list(card, skb); 2097 + atomic_inc(&vcc->stats->rx_err); 2098 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2099 + NS_SKB(iovb)->iovcnt); 2100 + vc->rx_iov = NULL; 2101 + recycle_iov_buf(card, iovb); 2102 + return; 2103 + } 2104 + } 2245 2105 2246 - if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */ 2247 - { 2248 - /* skb points to a small buffer */ 2249 - if (!atm_charge(vcc, skb->truesize)) 2250 - { 2251 - push_rxbufs(card, skb); 2252 - atomic_inc(&vcc->stats->rx_drop); 2253 - } 2254 - else 2255 - { 2256 - skb_put(skb, len); 2257 - dequeue_sm_buf(card, skb); 2106 + if (ns_rsqe_eopdu(rsqe)) { 2107 + /* This works correctly regardless of the endianness of the host */ 2108 + unsigned char *L1L2 = (unsigned char *)((u32) skb->data + 2109 + iov->iov_len - 6); 2110 + aal5_len = L1L2[0] << 8 | L1L2[1]; 2111 + len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; 2112 + if (ns_rsqe_crcerr(rsqe) || 2113 + len + 8 > iovb->len || len + (47 + 8) < iovb->len) { 2114 + printk("nicstar%d: AAL5 CRC error", card->index); 2115 + if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) 2116 + printk(" - PDU size mismatch.\n"); 2117 + else 2118 + printk(".\n"); 2119 + atomic_inc(&vcc->stats->rx_err); 2120 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2121 + NS_SKB(iovb)->iovcnt); 2122 + vc->rx_iov = NULL; 2123 + recycle_iov_buf(card, iovb); 2124 + return; 2125 + } 2126 + 2127 + /* By this point we (hopefully) have a complete SDU without errors. */ 2128 + 2129 + if (NS_SKB(iovb)->iovcnt == 1) { /* Just a small buffer */ 2130 + /* skb points to a small buffer */ 2131 + if (!atm_charge(vcc, skb->truesize)) { 2132 + push_rxbufs(card, skb); 2133 + atomic_inc(&vcc->stats->rx_drop); 2134 + } else { 2135 + skb_put(skb, len); 2136 + dequeue_sm_buf(card, skb); 2258 2137 #ifdef NS_USE_DESTRUCTORS 2259 - skb->destructor = ns_sb_destructor; 2138 + skb->destructor = ns_sb_destructor; 2260 2139 #endif /* NS_USE_DESTRUCTORS */ 2261 - ATM_SKB(skb)->vcc = vcc; 2262 - __net_timestamp(skb); 2263 - vcc->push(vcc, skb); 2264 - atomic_inc(&vcc->stats->rx); 2265 - } 2266 - } 2267 - else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ 2268 - { 2269 - struct sk_buff *sb; 2140 + ATM_SKB(skb)->vcc = vcc; 2141 + __net_timestamp(skb); 2142 + vcc->push(vcc, skb); 2143 + atomic_inc(&vcc->stats->rx); 2144 + } 2145 + } else if (NS_SKB(iovb)->iovcnt == 2) { /* One small plus one large buffer */ 2146 + struct sk_buff *sb; 2270 2147 2271 - sb = (struct sk_buff *) (iov - 1)->iov_base; 2272 - /* skb points to a large buffer */ 2148 + sb = (struct sk_buff *)(iov - 1)->iov_base; 2149 + /* skb points to a large buffer */ 2273 2150 2274 - if (len <= NS_SMBUFSIZE) 2275 - { 2276 - if (!atm_charge(vcc, sb->truesize)) 2277 - { 2278 - push_rxbufs(card, sb); 2279 - atomic_inc(&vcc->stats->rx_drop); 2280 - } 2281 - else 2282 - { 2283 - skb_put(sb, len); 2284 - dequeue_sm_buf(card, sb); 2151 + if (len <= NS_SMBUFSIZE) { 2152 + if (!atm_charge(vcc, sb->truesize)) { 2153 + push_rxbufs(card, sb); 2154 + atomic_inc(&vcc->stats->rx_drop); 2155 + } else { 2156 + skb_put(sb, len); 2157 + dequeue_sm_buf(card, sb); 2285 2158 #ifdef NS_USE_DESTRUCTORS 2286 - sb->destructor = ns_sb_destructor; 2159 + sb->destructor = ns_sb_destructor; 2287 2160 #endif /* NS_USE_DESTRUCTORS */ 2288 - ATM_SKB(sb)->vcc = vcc; 2289 - __net_timestamp(sb); 2290 - vcc->push(vcc, sb); 2291 - atomic_inc(&vcc->stats->rx); 2292 - } 2161 + ATM_SKB(sb)->vcc = vcc; 2162 + __net_timestamp(sb); 2163 + vcc->push(vcc, sb); 2164 + atomic_inc(&vcc->stats->rx); 2165 + } 2293 2166 2294 - push_rxbufs(card, skb); 2167 + push_rxbufs(card, skb); 2295 2168 2296 - } 2297 - else /* len > NS_SMBUFSIZE, the usual case */ 2298 - { 2299 - if (!atm_charge(vcc, skb->truesize)) 2300 - { 2301 - push_rxbufs(card, skb); 2302 - atomic_inc(&vcc->stats->rx_drop); 2303 - } 2304 - else 2305 - { 2306 - dequeue_lg_buf(card, skb); 2169 + } else { /* len > NS_SMBUFSIZE, the usual case */ 2170 + 2171 + if (!atm_charge(vcc, skb->truesize)) { 2172 + push_rxbufs(card, skb); 2173 + atomic_inc(&vcc->stats->rx_drop); 2174 + } else { 2175 + dequeue_lg_buf(card, skb); 2307 2176 #ifdef NS_USE_DESTRUCTORS 2308 - skb->destructor = ns_lb_destructor; 2177 + skb->destructor = ns_lb_destructor; 2309 2178 #endif /* NS_USE_DESTRUCTORS */ 2310 - skb_push(skb, NS_SMBUFSIZE); 2311 - skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); 2312 - skb_put(skb, len - NS_SMBUFSIZE); 2313 - ATM_SKB(skb)->vcc = vcc; 2314 - __net_timestamp(skb); 2315 - vcc->push(vcc, skb); 2316 - atomic_inc(&vcc->stats->rx); 2317 - } 2179 + skb_push(skb, NS_SMBUFSIZE); 2180 + skb_copy_from_linear_data(sb, skb->data, 2181 + NS_SMBUFSIZE); 2182 + skb_put(skb, len - NS_SMBUFSIZE); 2183 + ATM_SKB(skb)->vcc = vcc; 2184 + __net_timestamp(skb); 2185 + vcc->push(vcc, skb); 2186 + atomic_inc(&vcc->stats->rx); 2187 + } 2318 2188 2319 - push_rxbufs(card, sb); 2189 + push_rxbufs(card, sb); 2320 2190 2321 - } 2322 - 2323 - } 2324 - else /* Must push a huge buffer */ 2325 - { 2326 - struct sk_buff *hb, *sb, *lb; 2327 - int remaining, tocopy; 2328 - int j; 2191 + } 2329 2192 2330 - hb = skb_dequeue(&(card->hbpool.queue)); 2331 - if (hb == NULL) /* No buffers in the queue */ 2332 - { 2193 + } else { /* Must push a huge buffer */ 2333 2194 2334 - hb = dev_alloc_skb(NS_HBUFSIZE); 2335 - if (hb == NULL) 2336 - { 2337 - printk("nicstar%d: Out of huge buffers.\n", card->index); 2338 - atomic_inc(&vcc->stats->rx_drop); 2339 - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, 2340 - NS_SKB(iovb)->iovcnt); 2341 - vc->rx_iov = NULL; 2342 - recycle_iov_buf(card, iovb); 2343 - return; 2344 - } 2345 - else if (card->hbpool.count < card->hbnr.min) 2346 - { 2347 - struct sk_buff *new_hb; 2348 - if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2349 - { 2350 - skb_queue_tail(&card->hbpool.queue, new_hb); 2351 - card->hbpool.count++; 2352 - } 2353 - } 2354 - NS_SKB_CB(hb)->buf_type = BUF_NONE; 2355 - } 2356 - else 2357 - if (--card->hbpool.count < card->hbnr.min) 2358 - { 2359 - struct sk_buff *new_hb; 2360 - if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2361 - { 2362 - NS_SKB_CB(new_hb)->buf_type = BUF_NONE; 2363 - skb_queue_tail(&card->hbpool.queue, new_hb); 2364 - card->hbpool.count++; 2365 - } 2366 - if (card->hbpool.count < card->hbnr.min) 2367 - { 2368 - if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2369 - { 2370 - NS_SKB_CB(new_hb)->buf_type = BUF_NONE; 2371 - skb_queue_tail(&card->hbpool.queue, new_hb); 2372 - card->hbpool.count++; 2373 - } 2374 - } 2375 - } 2195 + struct sk_buff *hb, *sb, *lb; 2196 + int remaining, tocopy; 2197 + int j; 2376 2198 2377 - iov = (struct iovec *) iovb->data; 2199 + hb = skb_dequeue(&(card->hbpool.queue)); 2200 + if (hb == NULL) { /* No buffers in the queue */ 2378 2201 2379 - if (!atm_charge(vcc, hb->truesize)) 2380 - { 2381 - recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt); 2382 - if (card->hbpool.count < card->hbnr.max) 2383 - { 2384 - skb_queue_tail(&card->hbpool.queue, hb); 2385 - card->hbpool.count++; 2386 - } 2387 - else 2388 - dev_kfree_skb_any(hb); 2389 - atomic_inc(&vcc->stats->rx_drop); 2390 - } 2391 - else 2392 - { 2393 - /* Copy the small buffer to the huge buffer */ 2394 - sb = (struct sk_buff *) iov->iov_base; 2395 - skb_copy_from_linear_data(sb, hb->data, iov->iov_len); 2396 - skb_put(hb, iov->iov_len); 2397 - remaining = len - iov->iov_len; 2398 - iov++; 2399 - /* Free the small buffer */ 2400 - push_rxbufs(card, sb); 2202 + hb = dev_alloc_skb(NS_HBUFSIZE); 2203 + if (hb == NULL) { 2204 + printk 2205 + ("nicstar%d: Out of huge buffers.\n", 2206 + card->index); 2207 + atomic_inc(&vcc->stats->rx_drop); 2208 + recycle_iovec_rx_bufs(card, 2209 + (struct iovec *) 2210 + iovb->data, 2211 + NS_SKB(iovb)-> 2212 + iovcnt); 2213 + vc->rx_iov = NULL; 2214 + recycle_iov_buf(card, iovb); 2215 + return; 2216 + } else if (card->hbpool.count < card->hbnr.min) { 2217 + struct sk_buff *new_hb; 2218 + if ((new_hb = 2219 + dev_alloc_skb(NS_HBUFSIZE)) != 2220 + NULL) { 2221 + skb_queue_tail(&card->hbpool. 2222 + queue, new_hb); 2223 + card->hbpool.count++; 2224 + } 2225 + } 2226 + NS_SKB_CB(hb)->buf_type = BUF_NONE; 2227 + } else if (--card->hbpool.count < card->hbnr.min) { 2228 + struct sk_buff *new_hb; 2229 + if ((new_hb = 2230 + dev_alloc_skb(NS_HBUFSIZE)) != NULL) { 2231 + NS_SKB_CB(new_hb)->buf_type = BUF_NONE; 2232 + skb_queue_tail(&card->hbpool.queue, 2233 + new_hb); 2234 + card->hbpool.count++; 2235 + } 2236 + if (card->hbpool.count < card->hbnr.min) { 2237 + if ((new_hb = 2238 + dev_alloc_skb(NS_HBUFSIZE)) != 2239 + NULL) { 2240 + NS_SKB_CB(new_hb)->buf_type = 2241 + BUF_NONE; 2242 + skb_queue_tail(&card->hbpool. 2243 + queue, new_hb); 2244 + card->hbpool.count++; 2245 + } 2246 + } 2247 + } 2401 2248 2402 - /* Copy all large buffers to the huge buffer and free them */ 2403 - for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) 2404 - { 2405 - lb = (struct sk_buff *) iov->iov_base; 2406 - tocopy = min_t(int, remaining, iov->iov_len); 2407 - skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); 2408 - skb_put(hb, tocopy); 2409 - iov++; 2410 - remaining -= tocopy; 2411 - push_rxbufs(card, lb); 2412 - } 2249 + iov = (struct iovec *)iovb->data; 2250 + 2251 + if (!atm_charge(vcc, hb->truesize)) { 2252 + recycle_iovec_rx_bufs(card, iov, 2253 + NS_SKB(iovb)->iovcnt); 2254 + if (card->hbpool.count < card->hbnr.max) { 2255 + skb_queue_tail(&card->hbpool.queue, hb); 2256 + card->hbpool.count++; 2257 + } else 2258 + dev_kfree_skb_any(hb); 2259 + atomic_inc(&vcc->stats->rx_drop); 2260 + } else { 2261 + /* Copy the small buffer to the huge buffer */ 2262 + sb = (struct sk_buff *)iov->iov_base; 2263 + skb_copy_from_linear_data(sb, hb->data, 2264 + iov->iov_len); 2265 + skb_put(hb, iov->iov_len); 2266 + remaining = len - iov->iov_len; 2267 + iov++; 2268 + /* Free the small buffer */ 2269 + push_rxbufs(card, sb); 2270 + 2271 + /* Copy all large buffers to the huge buffer and free them */ 2272 + for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) { 2273 + lb = (struct sk_buff *)iov->iov_base; 2274 + tocopy = 2275 + min_t(int, remaining, iov->iov_len); 2276 + skb_copy_from_linear_data(lb, 2277 + skb_tail_pointer 2278 + (hb), tocopy); 2279 + skb_put(hb, tocopy); 2280 + iov++; 2281 + remaining -= tocopy; 2282 + push_rxbufs(card, lb); 2283 + } 2413 2284 #ifdef EXTRA_DEBUG 2414 - if (remaining != 0 || hb->len != len) 2415 - printk("nicstar%d: Huge buffer len mismatch.\n", card->index); 2285 + if (remaining != 0 || hb->len != len) 2286 + printk 2287 + ("nicstar%d: Huge buffer len mismatch.\n", 2288 + card->index); 2416 2289 #endif /* EXTRA_DEBUG */ 2417 - ATM_SKB(hb)->vcc = vcc; 2290 + ATM_SKB(hb)->vcc = vcc; 2418 2291 #ifdef NS_USE_DESTRUCTORS 2419 - hb->destructor = ns_hb_destructor; 2292 + hb->destructor = ns_hb_destructor; 2420 2293 #endif /* NS_USE_DESTRUCTORS */ 2421 - __net_timestamp(hb); 2422 - vcc->push(vcc, hb); 2423 - atomic_inc(&vcc->stats->rx); 2424 - } 2425 - } 2294 + __net_timestamp(hb); 2295 + vcc->push(vcc, hb); 2296 + atomic_inc(&vcc->stats->rx); 2297 + } 2298 + } 2426 2299 2427 - vc->rx_iov = NULL; 2428 - recycle_iov_buf(card, iovb); 2429 - } 2300 + vc->rx_iov = NULL; 2301 + recycle_iov_buf(card, iovb); 2302 + } 2430 2303 2431 2304 } 2432 - 2433 - 2434 2305 2435 2306 #ifdef NS_USE_DESTRUCTORS 2436 2307 2437 2308 static void ns_sb_destructor(struct sk_buff *sb) 2438 2309 { 2439 - ns_dev *card; 2440 - u32 stat; 2310 + ns_dev *card; 2311 + u32 stat; 2441 2312 2442 - card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; 2443 - stat = readl(card->membase + STAT); 2444 - card->sbfqc = ns_stat_sfbqc_get(stat); 2445 - card->lbfqc = ns_stat_lfbqc_get(stat); 2313 + card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; 2314 + stat = readl(card->membase + STAT); 2315 + card->sbfqc = ns_stat_sfbqc_get(stat); 2316 + card->lbfqc = ns_stat_lfbqc_get(stat); 2446 2317 2447 - do 2448 - { 2449 - sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2450 - if (sb == NULL) 2451 - break; 2452 - NS_SKB_CB(sb)->buf_type = BUF_SM; 2453 - skb_queue_tail(&card->sbpool.queue, sb); 2454 - skb_reserve(sb, NS_AAL0_HEADER); 2455 - push_rxbufs(card, sb); 2456 - } while (card->sbfqc < card->sbnr.min); 2318 + do { 2319 + sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2320 + if (sb == NULL) 2321 + break; 2322 + NS_SKB_CB(sb)->buf_type = BUF_SM; 2323 + skb_queue_tail(&card->sbpool.queue, sb); 2324 + skb_reserve(sb, NS_AAL0_HEADER); 2325 + push_rxbufs(card, sb); 2326 + } while (card->sbfqc < card->sbnr.min); 2457 2327 } 2458 - 2459 - 2460 2328 2461 2329 static void ns_lb_destructor(struct sk_buff *lb) 2462 2330 { 2463 - ns_dev *card; 2464 - u32 stat; 2331 + ns_dev *card; 2332 + u32 stat; 2465 2333 2466 - card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; 2467 - stat = readl(card->membase + STAT); 2468 - card->sbfqc = ns_stat_sfbqc_get(stat); 2469 - card->lbfqc = ns_stat_lfbqc_get(stat); 2334 + card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; 2335 + stat = readl(card->membase + STAT); 2336 + card->sbfqc = ns_stat_sfbqc_get(stat); 2337 + card->lbfqc = ns_stat_lfbqc_get(stat); 2470 2338 2471 - do 2472 - { 2473 - lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2474 - if (lb == NULL) 2475 - break; 2476 - NS_SKB_CB(lb)->buf_type = BUF_LG; 2477 - skb_queue_tail(&card->lbpool.queue, lb); 2478 - skb_reserve(lb, NS_SMBUFSIZE); 2479 - push_rxbufs(card, lb); 2480 - } while (card->lbfqc < card->lbnr.min); 2339 + do { 2340 + lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2341 + if (lb == NULL) 2342 + break; 2343 + NS_SKB_CB(lb)->buf_type = BUF_LG; 2344 + skb_queue_tail(&card->lbpool.queue, lb); 2345 + skb_reserve(lb, NS_SMBUFSIZE); 2346 + push_rxbufs(card, lb); 2347 + } while (card->lbfqc < card->lbnr.min); 2481 2348 } 2482 - 2483 - 2484 2349 2485 2350 static void ns_hb_destructor(struct sk_buff *hb) 2486 2351 { 2487 - ns_dev *card; 2352 + ns_dev *card; 2488 2353 2489 - card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; 2354 + card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; 2490 2355 2491 - while (card->hbpool.count < card->hbnr.init) 2492 - { 2493 - hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2494 - if (hb == NULL) 2495 - break; 2496 - NS_SKB_CB(hb)->buf_type = BUF_NONE; 2497 - skb_queue_tail(&card->hbpool.queue, hb); 2498 - card->hbpool.count++; 2499 - } 2356 + while (card->hbpool.count < card->hbnr.init) { 2357 + hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2358 + if (hb == NULL) 2359 + break; 2360 + NS_SKB_CB(hb)->buf_type = BUF_NONE; 2361 + skb_queue_tail(&card->hbpool.queue, hb); 2362 + card->hbpool.count++; 2363 + } 2500 2364 } 2501 2365 2502 2366 #endif /* NS_USE_DESTRUCTORS */ 2503 2367 2504 - 2505 - static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) 2368 + static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) 2506 2369 { 2507 2370 struct ns_skb_cb *cb = NS_SKB_CB(skb); 2508 2371 2509 2372 if (unlikely(cb->buf_type == BUF_NONE)) { 2510 - printk("nicstar%d: What kind of rx buffer is this?\n", card->index); 2373 + printk("nicstar%d: What kind of rx buffer is this?\n", 2374 + card->index); 2511 2375 dev_kfree_skb_any(skb); 2512 2376 } else 2513 2377 push_rxbufs(card, skb); 2514 2378 } 2515 2379 2516 - 2517 - static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) 2380 + static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) 2518 2381 { 2519 2382 while (count-- > 0) 2520 - recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base); 2383 + recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); 2521 2384 } 2522 2385 2523 - 2524 - static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) 2386 + static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) 2525 2387 { 2526 - if (card->iovpool.count < card->iovnr.max) 2527 - { 2528 - skb_queue_tail(&card->iovpool.queue, iovb); 2529 - card->iovpool.count++; 2530 - } 2531 - else 2532 - dev_kfree_skb_any(iovb); 2388 + if (card->iovpool.count < card->iovnr.max) { 2389 + skb_queue_tail(&card->iovpool.queue, iovb); 2390 + card->iovpool.count++; 2391 + } else 2392 + dev_kfree_skb_any(iovb); 2533 2393 } 2534 2394 2535 - 2536 - 2537 - static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) 2395 + static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) 2538 2396 { 2539 - skb_unlink(sb, &card->sbpool.queue); 2397 + skb_unlink(sb, &card->sbpool.queue); 2540 2398 #ifdef NS_USE_DESTRUCTORS 2541 - if (card->sbfqc < card->sbnr.min) 2399 + if (card->sbfqc < card->sbnr.min) 2542 2400 #else 2543 - if (card->sbfqc < card->sbnr.init) 2544 - { 2545 - struct sk_buff *new_sb; 2546 - if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2547 - { 2548 - NS_SKB_CB(new_sb)->buf_type = BUF_SM; 2549 - skb_queue_tail(&card->sbpool.queue, new_sb); 2550 - skb_reserve(new_sb, NS_AAL0_HEADER); 2551 - push_rxbufs(card, new_sb); 2552 - } 2553 - } 2554 - if (card->sbfqc < card->sbnr.init) 2401 + if (card->sbfqc < card->sbnr.init) { 2402 + struct sk_buff *new_sb; 2403 + if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2404 + NS_SKB_CB(new_sb)->buf_type = BUF_SM; 2405 + skb_queue_tail(&card->sbpool.queue, new_sb); 2406 + skb_reserve(new_sb, NS_AAL0_HEADER); 2407 + push_rxbufs(card, new_sb); 2408 + } 2409 + } 2410 + if (card->sbfqc < card->sbnr.init) 2555 2411 #endif /* NS_USE_DESTRUCTORS */ 2556 - { 2557 - struct sk_buff *new_sb; 2558 - if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2559 - { 2560 - NS_SKB_CB(new_sb)->buf_type = BUF_SM; 2561 - skb_queue_tail(&card->sbpool.queue, new_sb); 2562 - skb_reserve(new_sb, NS_AAL0_HEADER); 2563 - push_rxbufs(card, new_sb); 2564 - } 2565 - } 2412 + { 2413 + struct sk_buff *new_sb; 2414 + if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2415 + NS_SKB_CB(new_sb)->buf_type = BUF_SM; 2416 + skb_queue_tail(&card->sbpool.queue, new_sb); 2417 + skb_reserve(new_sb, NS_AAL0_HEADER); 2418 + push_rxbufs(card, new_sb); 2419 + } 2420 + } 2566 2421 } 2567 2422 2568 - 2569 - 2570 - static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) 2423 + static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) 2571 2424 { 2572 - skb_unlink(lb, &card->lbpool.queue); 2425 + skb_unlink(lb, &card->lbpool.queue); 2573 2426 #ifdef NS_USE_DESTRUCTORS 2574 - if (card->lbfqc < card->lbnr.min) 2427 + if (card->lbfqc < card->lbnr.min) 2575 2428 #else 2576 - if (card->lbfqc < card->lbnr.init) 2577 - { 2578 - struct sk_buff *new_lb; 2579 - if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2580 - { 2581 - NS_SKB_CB(new_lb)->buf_type = BUF_LG; 2582 - skb_queue_tail(&card->lbpool.queue, new_lb); 2583 - skb_reserve(new_lb, NS_SMBUFSIZE); 2584 - push_rxbufs(card, new_lb); 2585 - } 2586 - } 2587 - if (card->lbfqc < card->lbnr.init) 2429 + if (card->lbfqc < card->lbnr.init) { 2430 + struct sk_buff *new_lb; 2431 + if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2432 + NS_SKB_CB(new_lb)->buf_type = BUF_LG; 2433 + skb_queue_tail(&card->lbpool.queue, new_lb); 2434 + skb_reserve(new_lb, NS_SMBUFSIZE); 2435 + push_rxbufs(card, new_lb); 2436 + } 2437 + } 2438 + if (card->lbfqc < card->lbnr.init) 2588 2439 #endif /* NS_USE_DESTRUCTORS */ 2589 - { 2590 - struct sk_buff *new_lb; 2591 - if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2592 - { 2593 - NS_SKB_CB(new_lb)->buf_type = BUF_LG; 2594 - skb_queue_tail(&card->lbpool.queue, new_lb); 2595 - skb_reserve(new_lb, NS_SMBUFSIZE); 2596 - push_rxbufs(card, new_lb); 2597 - } 2598 - } 2440 + { 2441 + struct sk_buff *new_lb; 2442 + if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2443 + NS_SKB_CB(new_lb)->buf_type = BUF_LG; 2444 + skb_queue_tail(&card->lbpool.queue, new_lb); 2445 + skb_reserve(new_lb, NS_SMBUFSIZE); 2446 + push_rxbufs(card, new_lb); 2447 + } 2448 + } 2599 2449 } 2600 2450 2601 - 2602 - 2603 - static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page) 2451 + static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) 2604 2452 { 2605 - u32 stat; 2606 - ns_dev *card; 2607 - int left; 2453 + u32 stat; 2454 + ns_dev *card; 2455 + int left; 2608 2456 2609 - left = (int) *pos; 2610 - card = (ns_dev *) dev->dev_data; 2611 - stat = readl(card->membase + STAT); 2612 - if (!left--) 2613 - return sprintf(page, "Pool count min init max \n"); 2614 - if (!left--) 2615 - return sprintf(page, "Small %5d %5d %5d %5d \n", 2616 - ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, 2617 - card->sbnr.max); 2618 - if (!left--) 2619 - return sprintf(page, "Large %5d %5d %5d %5d \n", 2620 - ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, 2621 - card->lbnr.max); 2622 - if (!left--) 2623 - return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, 2624 - card->hbnr.min, card->hbnr.init, card->hbnr.max); 2625 - if (!left--) 2626 - return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, 2627 - card->iovnr.min, card->iovnr.init, card->iovnr.max); 2628 - if (!left--) 2629 - { 2630 - int retval; 2631 - retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); 2632 - card->intcnt = 0; 2633 - return retval; 2634 - } 2457 + left = (int)*pos; 2458 + card = (ns_dev *) dev->dev_data; 2459 + stat = readl(card->membase + STAT); 2460 + if (!left--) 2461 + return sprintf(page, "Pool count min init max \n"); 2462 + if (!left--) 2463 + return sprintf(page, "Small %5d %5d %5d %5d \n", 2464 + ns_stat_sfbqc_get(stat), card->sbnr.min, 2465 + card->sbnr.init, card->sbnr.max); 2466 + if (!left--) 2467 + return sprintf(page, "Large %5d %5d %5d %5d \n", 2468 + ns_stat_lfbqc_get(stat), card->lbnr.min, 2469 + card->lbnr.init, card->lbnr.max); 2470 + if (!left--) 2471 + return sprintf(page, "Huge %5d %5d %5d %5d \n", 2472 + card->hbpool.count, card->hbnr.min, 2473 + card->hbnr.init, card->hbnr.max); 2474 + if (!left--) 2475 + return sprintf(page, "Iovec %5d %5d %5d %5d \n", 2476 + card->iovpool.count, card->iovnr.min, 2477 + card->iovnr.init, card->iovnr.max); 2478 + if (!left--) { 2479 + int retval; 2480 + retval = 2481 + sprintf(page, "Interrupt counter: %u \n", card->intcnt); 2482 + card->intcnt = 0; 2483 + return retval; 2484 + } 2635 2485 #if 0 2636 - /* Dump 25.6 Mbps PHY registers */ 2637 - /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it 2638 - here just in case it's needed for debugging. */ 2639 - if (card->max_pcr == ATM_25_PCR && !left--) 2640 - { 2641 - u32 phy_regs[4]; 2642 - u32 i; 2486 + /* Dump 25.6 Mbps PHY registers */ 2487 + /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it 2488 + here just in case it's needed for debugging. */ 2489 + if (card->max_pcr == ATM_25_PCR && !left--) { 2490 + u32 phy_regs[4]; 2491 + u32 i; 2643 2492 2644 - for (i = 0; i < 4; i++) 2645 - { 2646 - while (CMD_BUSY(card)); 2647 - writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); 2648 - while (CMD_BUSY(card)); 2649 - phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; 2650 - } 2493 + for (i = 0; i < 4; i++) { 2494 + while (CMD_BUSY(card)) ; 2495 + writel(NS_CMD_READ_UTILITY | 0x00000200 | i, 2496 + card->membase + CMD); 2497 + while (CMD_BUSY(card)) ; 2498 + phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; 2499 + } 2651 2500 2652 - return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", 2653 - phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); 2654 - } 2501 + return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", 2502 + phy_regs[0], phy_regs[1], phy_regs[2], 2503 + phy_regs[3]); 2504 + } 2655 2505 #endif /* 0 - Dump 25.6 Mbps PHY registers */ 2656 2506 #if 0 2657 - /* Dump TST */ 2658 - if (left-- < NS_TST_NUM_ENTRIES) 2659 - { 2660 - if (card->tste2vc[left + 1] == NULL) 2661 - return sprintf(page, "%5d - VBR/UBR \n", left + 1); 2662 - else 2663 - return sprintf(page, "%5d - %d %d \n", left + 1, 2664 - card->tste2vc[left + 1]->tx_vcc->vpi, 2665 - card->tste2vc[left + 1]->tx_vcc->vci); 2666 - } 2507 + /* Dump TST */ 2508 + if (left-- < NS_TST_NUM_ENTRIES) { 2509 + if (card->tste2vc[left + 1] == NULL) 2510 + return sprintf(page, "%5d - VBR/UBR \n", left + 1); 2511 + else 2512 + return sprintf(page, "%5d - %d %d \n", left + 1, 2513 + card->tste2vc[left + 1]->tx_vcc->vpi, 2514 + card->tste2vc[left + 1]->tx_vcc->vci); 2515 + } 2667 2516 #endif /* 0 */ 2668 - return 0; 2517 + return 0; 2669 2518 } 2670 2519 2671 - 2672 - 2673 - static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) 2520 + static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) 2674 2521 { 2675 - ns_dev *card; 2676 - pool_levels pl; 2677 - long btype; 2678 - unsigned long flags; 2522 + ns_dev *card; 2523 + pool_levels pl; 2524 + long btype; 2525 + unsigned long flags; 2679 2526 2680 - card = dev->dev_data; 2681 - switch (cmd) 2682 - { 2683 - case NS_GETPSTAT: 2684 - if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype)) 2685 - return -EFAULT; 2686 - switch (pl.buftype) 2687 - { 2688 - case NS_BUFTYPE_SMALL: 2689 - pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); 2690 - pl.level.min = card->sbnr.min; 2691 - pl.level.init = card->sbnr.init; 2692 - pl.level.max = card->sbnr.max; 2693 - break; 2527 + card = dev->dev_data; 2528 + switch (cmd) { 2529 + case NS_GETPSTAT: 2530 + if (get_user 2531 + (pl.buftype, &((pool_levels __user *) arg)->buftype)) 2532 + return -EFAULT; 2533 + switch (pl.buftype) { 2534 + case NS_BUFTYPE_SMALL: 2535 + pl.count = 2536 + ns_stat_sfbqc_get(readl(card->membase + STAT)); 2537 + pl.level.min = card->sbnr.min; 2538 + pl.level.init = card->sbnr.init; 2539 + pl.level.max = card->sbnr.max; 2540 + break; 2694 2541 2695 - case NS_BUFTYPE_LARGE: 2696 - pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); 2697 - pl.level.min = card->lbnr.min; 2698 - pl.level.init = card->lbnr.init; 2699 - pl.level.max = card->lbnr.max; 2700 - break; 2542 + case NS_BUFTYPE_LARGE: 2543 + pl.count = 2544 + ns_stat_lfbqc_get(readl(card->membase + STAT)); 2545 + pl.level.min = card->lbnr.min; 2546 + pl.level.init = card->lbnr.init; 2547 + pl.level.max = card->lbnr.max; 2548 + break; 2701 2549 2702 - case NS_BUFTYPE_HUGE: 2703 - pl.count = card->hbpool.count; 2704 - pl.level.min = card->hbnr.min; 2705 - pl.level.init = card->hbnr.init; 2706 - pl.level.max = card->hbnr.max; 2707 - break; 2550 + case NS_BUFTYPE_HUGE: 2551 + pl.count = card->hbpool.count; 2552 + pl.level.min = card->hbnr.min; 2553 + pl.level.init = card->hbnr.init; 2554 + pl.level.max = card->hbnr.max; 2555 + break; 2708 2556 2709 - case NS_BUFTYPE_IOVEC: 2710 - pl.count = card->iovpool.count; 2711 - pl.level.min = card->iovnr.min; 2712 - pl.level.init = card->iovnr.init; 2713 - pl.level.max = card->iovnr.max; 2714 - break; 2557 + case NS_BUFTYPE_IOVEC: 2558 + pl.count = card->iovpool.count; 2559 + pl.level.min = card->iovnr.min; 2560 + pl.level.init = card->iovnr.init; 2561 + pl.level.max = card->iovnr.max; 2562 + break; 2715 2563 2716 - default: 2717 - return -ENOIOCTLCMD; 2564 + default: 2565 + return -ENOIOCTLCMD; 2718 2566 2719 - } 2720 - if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) 2721 - return (sizeof(pl)); 2722 - else 2723 - return -EFAULT; 2567 + } 2568 + if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) 2569 + return (sizeof(pl)); 2570 + else 2571 + return -EFAULT; 2724 2572 2725 - case NS_SETBUFLEV: 2726 - if (!capable(CAP_NET_ADMIN)) 2727 - return -EPERM; 2728 - if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) 2729 - return -EFAULT; 2730 - if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) 2731 - return -EINVAL; 2732 - if (pl.level.min == 0) 2733 - return -EINVAL; 2734 - switch (pl.buftype) 2735 - { 2736 - case NS_BUFTYPE_SMALL: 2737 - if (pl.level.max > TOP_SB) 2738 - return -EINVAL; 2739 - card->sbnr.min = pl.level.min; 2740 - card->sbnr.init = pl.level.init; 2741 - card->sbnr.max = pl.level.max; 2742 - break; 2573 + case NS_SETBUFLEV: 2574 + if (!capable(CAP_NET_ADMIN)) 2575 + return -EPERM; 2576 + if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) 2577 + return -EFAULT; 2578 + if (pl.level.min >= pl.level.init 2579 + || pl.level.init >= pl.level.max) 2580 + return -EINVAL; 2581 + if (pl.level.min == 0) 2582 + return -EINVAL; 2583 + switch (pl.buftype) { 2584 + case NS_BUFTYPE_SMALL: 2585 + if (pl.level.max > TOP_SB) 2586 + return -EINVAL; 2587 + card->sbnr.min = pl.level.min; 2588 + card->sbnr.init = pl.level.init; 2589 + card->sbnr.max = pl.level.max; 2590 + break; 2743 2591 2744 - case NS_BUFTYPE_LARGE: 2745 - if (pl.level.max > TOP_LB) 2746 - return -EINVAL; 2747 - card->lbnr.min = pl.level.min; 2748 - card->lbnr.init = pl.level.init; 2749 - card->lbnr.max = pl.level.max; 2750 - break; 2592 + case NS_BUFTYPE_LARGE: 2593 + if (pl.level.max > TOP_LB) 2594 + return -EINVAL; 2595 + card->lbnr.min = pl.level.min; 2596 + card->lbnr.init = pl.level.init; 2597 + card->lbnr.max = pl.level.max; 2598 + break; 2751 2599 2752 - case NS_BUFTYPE_HUGE: 2753 - if (pl.level.max > TOP_HB) 2754 - return -EINVAL; 2755 - card->hbnr.min = pl.level.min; 2756 - card->hbnr.init = pl.level.init; 2757 - card->hbnr.max = pl.level.max; 2758 - break; 2600 + case NS_BUFTYPE_HUGE: 2601 + if (pl.level.max > TOP_HB) 2602 + return -EINVAL; 2603 + card->hbnr.min = pl.level.min; 2604 + card->hbnr.init = pl.level.init; 2605 + card->hbnr.max = pl.level.max; 2606 + break; 2759 2607 2760 - case NS_BUFTYPE_IOVEC: 2761 - if (pl.level.max > TOP_IOVB) 2762 - return -EINVAL; 2763 - card->iovnr.min = pl.level.min; 2764 - card->iovnr.init = pl.level.init; 2765 - card->iovnr.max = pl.level.max; 2766 - break; 2608 + case NS_BUFTYPE_IOVEC: 2609 + if (pl.level.max > TOP_IOVB) 2610 + return -EINVAL; 2611 + card->iovnr.min = pl.level.min; 2612 + card->iovnr.init = pl.level.init; 2613 + card->iovnr.max = pl.level.max; 2614 + break; 2767 2615 2768 - default: 2769 - return -EINVAL; 2616 + default: 2617 + return -EINVAL; 2770 2618 2771 - } 2772 - return 0; 2619 + } 2620 + return 0; 2773 2621 2774 - case NS_ADJBUFLEV: 2775 - if (!capable(CAP_NET_ADMIN)) 2776 - return -EPERM; 2777 - btype = (long) arg; /* a long is the same size as a pointer or bigger */ 2778 - switch (btype) 2779 - { 2780 - case NS_BUFTYPE_SMALL: 2781 - while (card->sbfqc < card->sbnr.init) 2782 - { 2783 - struct sk_buff *sb; 2622 + case NS_ADJBUFLEV: 2623 + if (!capable(CAP_NET_ADMIN)) 2624 + return -EPERM; 2625 + btype = (long)arg; /* a long is the same size as a pointer or bigger */ 2626 + switch (btype) { 2627 + case NS_BUFTYPE_SMALL: 2628 + while (card->sbfqc < card->sbnr.init) { 2629 + struct sk_buff *sb; 2784 2630 2785 - sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2786 - if (sb == NULL) 2787 - return -ENOMEM; 2788 - NS_SKB_CB(sb)->buf_type = BUF_SM; 2789 - skb_queue_tail(&card->sbpool.queue, sb); 2790 - skb_reserve(sb, NS_AAL0_HEADER); 2791 - push_rxbufs(card, sb); 2792 - } 2793 - break; 2631 + sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2632 + if (sb == NULL) 2633 + return -ENOMEM; 2634 + NS_SKB_CB(sb)->buf_type = BUF_SM; 2635 + skb_queue_tail(&card->sbpool.queue, sb); 2636 + skb_reserve(sb, NS_AAL0_HEADER); 2637 + push_rxbufs(card, sb); 2638 + } 2639 + break; 2794 2640 2795 - case NS_BUFTYPE_LARGE: 2796 - while (card->lbfqc < card->lbnr.init) 2797 - { 2798 - struct sk_buff *lb; 2641 + case NS_BUFTYPE_LARGE: 2642 + while (card->lbfqc < card->lbnr.init) { 2643 + struct sk_buff *lb; 2799 2644 2800 - lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2801 - if (lb == NULL) 2802 - return -ENOMEM; 2803 - NS_SKB_CB(lb)->buf_type = BUF_LG; 2804 - skb_queue_tail(&card->lbpool.queue, lb); 2805 - skb_reserve(lb, NS_SMBUFSIZE); 2806 - push_rxbufs(card, lb); 2807 - } 2808 - break; 2645 + lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2646 + if (lb == NULL) 2647 + return -ENOMEM; 2648 + NS_SKB_CB(lb)->buf_type = BUF_LG; 2649 + skb_queue_tail(&card->lbpool.queue, lb); 2650 + skb_reserve(lb, NS_SMBUFSIZE); 2651 + push_rxbufs(card, lb); 2652 + } 2653 + break; 2809 2654 2810 - case NS_BUFTYPE_HUGE: 2811 - while (card->hbpool.count > card->hbnr.init) 2812 - { 2813 - struct sk_buff *hb; 2655 + case NS_BUFTYPE_HUGE: 2656 + while (card->hbpool.count > card->hbnr.init) { 2657 + struct sk_buff *hb; 2814 2658 2815 - spin_lock_irqsave(&card->int_lock, flags); 2816 - hb = skb_dequeue(&card->hbpool.queue); 2817 - card->hbpool.count--; 2818 - spin_unlock_irqrestore(&card->int_lock, flags); 2819 - if (hb == NULL) 2820 - printk("nicstar%d: huge buffer count inconsistent.\n", 2821 - card->index); 2822 - else 2823 - dev_kfree_skb_any(hb); 2824 - 2825 - } 2826 - while (card->hbpool.count < card->hbnr.init) 2827 - { 2828 - struct sk_buff *hb; 2659 + spin_lock_irqsave(&card->int_lock, flags); 2660 + hb = skb_dequeue(&card->hbpool.queue); 2661 + card->hbpool.count--; 2662 + spin_unlock_irqrestore(&card->int_lock, flags); 2663 + if (hb == NULL) 2664 + printk 2665 + ("nicstar%d: huge buffer count inconsistent.\n", 2666 + card->index); 2667 + else 2668 + dev_kfree_skb_any(hb); 2829 2669 2830 - hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2831 - if (hb == NULL) 2832 - return -ENOMEM; 2833 - NS_SKB_CB(hb)->buf_type = BUF_NONE; 2834 - spin_lock_irqsave(&card->int_lock, flags); 2835 - skb_queue_tail(&card->hbpool.queue, hb); 2836 - card->hbpool.count++; 2837 - spin_unlock_irqrestore(&card->int_lock, flags); 2838 - } 2839 - break; 2670 + } 2671 + while (card->hbpool.count < card->hbnr.init) { 2672 + struct sk_buff *hb; 2840 2673 2841 - case NS_BUFTYPE_IOVEC: 2842 - while (card->iovpool.count > card->iovnr.init) 2843 - { 2844 - struct sk_buff *iovb; 2674 + hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2675 + if (hb == NULL) 2676 + return -ENOMEM; 2677 + NS_SKB_CB(hb)->buf_type = BUF_NONE; 2678 + spin_lock_irqsave(&card->int_lock, flags); 2679 + skb_queue_tail(&card->hbpool.queue, hb); 2680 + card->hbpool.count++; 2681 + spin_unlock_irqrestore(&card->int_lock, flags); 2682 + } 2683 + break; 2845 2684 2846 - spin_lock_irqsave(&card->int_lock, flags); 2847 - iovb = skb_dequeue(&card->iovpool.queue); 2848 - card->iovpool.count--; 2849 - spin_unlock_irqrestore(&card->int_lock, flags); 2850 - if (iovb == NULL) 2851 - printk("nicstar%d: iovec buffer count inconsistent.\n", 2852 - card->index); 2853 - else 2854 - dev_kfree_skb_any(iovb); 2685 + case NS_BUFTYPE_IOVEC: 2686 + while (card->iovpool.count > card->iovnr.init) { 2687 + struct sk_buff *iovb; 2855 2688 2856 - } 2857 - while (card->iovpool.count < card->iovnr.init) 2858 - { 2859 - struct sk_buff *iovb; 2689 + spin_lock_irqsave(&card->int_lock, flags); 2690 + iovb = skb_dequeue(&card->iovpool.queue); 2691 + card->iovpool.count--; 2692 + spin_unlock_irqrestore(&card->int_lock, flags); 2693 + if (iovb == NULL) 2694 + printk 2695 + ("nicstar%d: iovec buffer count inconsistent.\n", 2696 + card->index); 2697 + else 2698 + dev_kfree_skb_any(iovb); 2860 2699 2861 - iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2862 - if (iovb == NULL) 2863 - return -ENOMEM; 2864 - NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2865 - spin_lock_irqsave(&card->int_lock, flags); 2866 - skb_queue_tail(&card->iovpool.queue, iovb); 2867 - card->iovpool.count++; 2868 - spin_unlock_irqrestore(&card->int_lock, flags); 2869 - } 2870 - break; 2700 + } 2701 + while (card->iovpool.count < card->iovnr.init) { 2702 + struct sk_buff *iovb; 2871 2703 2872 - default: 2873 - return -EINVAL; 2704 + iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2705 + if (iovb == NULL) 2706 + return -ENOMEM; 2707 + NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2708 + spin_lock_irqsave(&card->int_lock, flags); 2709 + skb_queue_tail(&card->iovpool.queue, iovb); 2710 + card->iovpool.count++; 2711 + spin_unlock_irqrestore(&card->int_lock, flags); 2712 + } 2713 + break; 2874 2714 2875 - } 2876 - return 0; 2715 + default: 2716 + return -EINVAL; 2877 2717 2878 - default: 2879 - if (dev->phy && dev->phy->ioctl) { 2880 - return dev->phy->ioctl(dev, cmd, arg); 2881 - } 2882 - else { 2883 - printk("nicstar%d: %s == NULL \n", card->index, 2884 - dev->phy ? "dev->phy->ioctl" : "dev->phy"); 2885 - return -ENOIOCTLCMD; 2886 - } 2887 - } 2718 + } 2719 + return 0; 2720 + 2721 + default: 2722 + if (dev->phy && dev->phy->ioctl) { 2723 + return dev->phy->ioctl(dev, cmd, arg); 2724 + } else { 2725 + printk("nicstar%d: %s == NULL \n", card->index, 2726 + dev->phy ? "dev->phy->ioctl" : "dev->phy"); 2727 + return -ENOIOCTLCMD; 2728 + } 2729 + } 2888 2730 } 2889 2731 2890 - 2891 - static void which_list(ns_dev *card, struct sk_buff *skb) 2732 + static void which_list(ns_dev * card, struct sk_buff *skb) 2892 2733 { 2893 2734 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); 2894 2735 } 2895 2736 2896 - 2897 2737 static void ns_poll(unsigned long arg) 2898 2738 { 2899 - int i; 2900 - ns_dev *card; 2901 - unsigned long flags; 2902 - u32 stat_r, stat_w; 2739 + int i; 2740 + ns_dev *card; 2741 + unsigned long flags; 2742 + u32 stat_r, stat_w; 2903 2743 2904 - PRINTK("nicstar: Entering ns_poll().\n"); 2905 - for (i = 0; i < num_cards; i++) 2906 - { 2907 - card = cards[i]; 2908 - if (spin_is_locked(&card->int_lock)) { 2909 - /* Probably it isn't worth spinning */ 2910 - continue; 2911 - } 2912 - spin_lock_irqsave(&card->int_lock, flags); 2744 + PRINTK("nicstar: Entering ns_poll().\n"); 2745 + for (i = 0; i < num_cards; i++) { 2746 + card = cards[i]; 2747 + if (spin_is_locked(&card->int_lock)) { 2748 + /* Probably it isn't worth spinning */ 2749 + continue; 2750 + } 2751 + spin_lock_irqsave(&card->int_lock, flags); 2913 2752 2914 - stat_w = 0; 2915 - stat_r = readl(card->membase + STAT); 2916 - if (stat_r & NS_STAT_TSIF) 2917 - stat_w |= NS_STAT_TSIF; 2918 - if (stat_r & NS_STAT_EOPDU) 2919 - stat_w |= NS_STAT_EOPDU; 2753 + stat_w = 0; 2754 + stat_r = readl(card->membase + STAT); 2755 + if (stat_r & NS_STAT_TSIF) 2756 + stat_w |= NS_STAT_TSIF; 2757 + if (stat_r & NS_STAT_EOPDU) 2758 + stat_w |= NS_STAT_EOPDU; 2920 2759 2921 - process_tsq(card); 2922 - process_rsq(card); 2760 + process_tsq(card); 2761 + process_rsq(card); 2923 2762 2924 - writel(stat_w, card->membase + STAT); 2925 - spin_unlock_irqrestore(&card->int_lock, flags); 2926 - } 2927 - mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); 2928 - PRINTK("nicstar: Leaving ns_poll().\n"); 2763 + writel(stat_w, card->membase + STAT); 2764 + spin_unlock_irqrestore(&card->int_lock, flags); 2765 + } 2766 + mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); 2767 + PRINTK("nicstar: Leaving ns_poll().\n"); 2929 2768 } 2930 - 2931 - 2932 2769 2933 2770 static int ns_parse_mac(char *mac, unsigned char *esi) 2934 2771 { 2935 - int i, j; 2936 - short byte1, byte0; 2772 + int i, j; 2773 + short byte1, byte0; 2937 2774 2938 - if (mac == NULL || esi == NULL) 2939 - return -1; 2940 - j = 0; 2941 - for (i = 0; i < 6; i++) 2942 - { 2943 - if ((byte1 = ns_h2i(mac[j++])) < 0) 2944 - return -1; 2945 - if ((byte0 = ns_h2i(mac[j++])) < 0) 2946 - return -1; 2947 - esi[i] = (unsigned char) (byte1 * 16 + byte0); 2948 - if (i < 5) 2949 - { 2950 - if (mac[j++] != ':') 2951 - return -1; 2952 - } 2953 - } 2954 - return 0; 2775 + if (mac == NULL || esi == NULL) 2776 + return -1; 2777 + j = 0; 2778 + for (i = 0; i < 6; i++) { 2779 + if ((byte1 = ns_h2i(mac[j++])) < 0) 2780 + return -1; 2781 + if ((byte0 = ns_h2i(mac[j++])) < 0) 2782 + return -1; 2783 + esi[i] = (unsigned char)(byte1 * 16 + byte0); 2784 + if (i < 5) { 2785 + if (mac[j++] != ':') 2786 + return -1; 2787 + } 2788 + } 2789 + return 0; 2955 2790 } 2956 - 2957 - 2958 2791 2959 2792 static short ns_h2i(char c) 2960 2793 { 2961 - if (c >= '0' && c <= '9') 2962 - return (short) (c - '0'); 2963 - if (c >= 'A' && c <= 'F') 2964 - return (short) (c - 'A' + 10); 2965 - if (c >= 'a' && c <= 'f') 2966 - return (short) (c - 'a' + 10); 2967 - return -1; 2794 + if (c >= '0' && c <= '9') 2795 + return (short)(c - '0'); 2796 + if (c >= 'A' && c <= 'F') 2797 + return (short)(c - 'A' + 10); 2798 + if (c >= 'a' && c <= 'f') 2799 + return (short)(c - 'a' + 10); 2800 + return -1; 2968 2801 } 2969 - 2970 - 2971 2802 2972 2803 static void ns_phy_put(struct atm_dev *dev, unsigned char value, 2973 - unsigned long addr) 2804 + unsigned long addr) 2974 2805 { 2975 - ns_dev *card; 2976 - unsigned long flags; 2806 + ns_dev *card; 2807 + unsigned long flags; 2977 2808 2978 - card = dev->dev_data; 2979 - spin_lock_irqsave(&card->res_lock, flags); 2980 - while(CMD_BUSY(card)); 2981 - writel((unsigned long) value, card->membase + DR0); 2982 - writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), 2983 - card->membase + CMD); 2984 - spin_unlock_irqrestore(&card->res_lock, flags); 2809 + card = dev->dev_data; 2810 + spin_lock_irqsave(&card->res_lock, flags); 2811 + while (CMD_BUSY(card)) ; 2812 + writel((unsigned long)value, card->membase + DR0); 2813 + writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), 2814 + card->membase + CMD); 2815 + spin_unlock_irqrestore(&card->res_lock, flags); 2985 2816 } 2986 - 2987 - 2988 2817 2989 2818 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) 2990 2819 { 2991 - ns_dev *card; 2992 - unsigned long flags; 2993 - unsigned long data; 2820 + ns_dev *card; 2821 + unsigned long flags; 2822 + unsigned long data; 2994 2823 2995 - card = dev->dev_data; 2996 - spin_lock_irqsave(&card->res_lock, flags); 2997 - while(CMD_BUSY(card)); 2998 - writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), 2999 - card->membase + CMD); 3000 - while(CMD_BUSY(card)); 3001 - data = readl(card->membase + DR0) & 0x000000FF; 3002 - spin_unlock_irqrestore(&card->res_lock, flags); 3003 - return (unsigned char) data; 2824 + card = dev->dev_data; 2825 + spin_lock_irqsave(&card->res_lock, flags); 2826 + while (CMD_BUSY(card)) ; 2827 + writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), 2828 + card->membase + CMD); 2829 + while (CMD_BUSY(card)) ; 2830 + data = readl(card->membase + DR0) & 0x000000FF; 2831 + spin_unlock_irqrestore(&card->res_lock, flags); 2832 + return (unsigned char)data; 3004 2833 } 3005 - 3006 - 3007 2834 3008 2835 module_init(nicstar_init); 3009 2836 module_exit(nicstar_cleanup);
+253 -321
drivers/atm/nicstar.h
··· 1 - /****************************************************************************** 2 - * 1 + /* 3 2 * nicstar.h 4 3 * 5 4 * Header file for the nicstar device driver. ··· 7 8 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 8 9 * 9 10 * (C) INESC 1998 10 - * 11 - ******************************************************************************/ 12 - 11 + */ 13 12 14 13 #ifndef _LINUX_NICSTAR_H_ 15 14 #define _LINUX_NICSTAR_H_ 16 15 17 - 18 - /* Includes *******************************************************************/ 16 + /* Includes */ 19 17 20 18 #include <linux/types.h> 21 19 #include <linux/pci.h> ··· 21 25 #include <linux/atmdev.h> 22 26 #include <linux/atm_nicstar.h> 23 27 24 - 25 - /* Options ********************************************************************/ 28 + /* Options */ 26 29 27 30 #define NS_MAX_CARDS 4 /* Maximum number of NICStAR based cards 28 31 controlled by the device driver. Must 29 - be <= 5 */ 32 + be <= 5 */ 30 33 31 34 #undef RCQ_SUPPORT /* Do not define this for now */ 32 35 ··· 38 43 #define NS_VPIBITS 2 /* 0, 1, 2, or 8 */ 39 44 40 45 #define NS_MAX_RCTSIZE 4096 /* Number of entries. 4096 or 16384. 41 - Define 4096 only if (all) your card(s) 46 + Define 4096 only if (all) your card(s) 42 47 have 32K x 32bit SRAM, in which case 43 48 setting this to 16384 will just waste a 44 49 lot of memory. ··· 46 51 128K x 32bit SRAM will limit the maximum 47 52 VCI. */ 48 53 49 - /*#define NS_PCI_LATENCY 64*/ /* Must be a multiple of 32 */ 54 + /*#define NS_PCI_LATENCY 64*//* Must be a multiple of 32 */ 50 55 51 56 /* Number of buffers initially allocated */ 52 - #define NUM_SB 32 /* Must be even */ 53 - #define NUM_LB 24 /* Must be even */ 54 - #define NUM_HB 8 /* Pre-allocated huge buffers */ 55 - #define NUM_IOVB 48 /* Iovec buffers */ 57 + #define NUM_SB 32 /* Must be even */ 58 + #define NUM_LB 24 /* Must be even */ 59 + #define NUM_HB 8 /* Pre-allocated huge buffers */ 60 + #define NUM_IOVB 48 /* Iovec buffers */ 56 61 57 62 /* Lower level for count of buffers */ 58 - #define MIN_SB 8 /* Must be even */ 59 - #define MIN_LB 8 /* Must be even */ 63 + #define MIN_SB 8 /* Must be even */ 64 + #define MIN_LB 8 /* Must be even */ 60 65 #define MIN_HB 6 61 66 #define MIN_IOVB 8 62 67 63 68 /* Upper level for count of buffers */ 64 - #define MAX_SB 64 /* Must be even, <= 508 */ 65 - #define MAX_LB 48 /* Must be even, <= 508 */ 69 + #define MAX_SB 64 /* Must be even, <= 508 */ 70 + #define MAX_LB 48 /* Must be even, <= 508 */ 66 71 #define MAX_HB 10 67 72 #define MAX_IOVB 80 68 73 69 74 /* These are the absolute maximum allowed for the ioctl() */ 70 - #define TOP_SB 256 /* Must be even, <= 508 */ 71 - #define TOP_LB 128 /* Must be even, <= 508 */ 75 + #define TOP_SB 256 /* Must be even, <= 508 */ 76 + #define TOP_LB 128 /* Must be even, <= 508 */ 72 77 #define TOP_HB 64 73 78 #define TOP_IOVB 256 74 - 75 79 76 80 #define MAX_TBD_PER_VC 1 /* Number of TBDs before a TSR */ 77 81 #define MAX_TBD_PER_SCQ 10 /* Only meaningful for variable rate SCQs */ ··· 83 89 84 90 #define PCR_TOLERANCE (1.0001) 85 91 86 - 87 - 88 - /* ESI stuff ******************************************************************/ 92 + /* ESI stuff */ 89 93 90 94 #define NICSTAR_EPROM_MAC_ADDR_OFFSET 0x6C 91 95 #define NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT 0xF6 92 96 93 - 94 - /* #defines *******************************************************************/ 97 + /* #defines */ 95 98 96 99 #define NS_IOREMAP_SIZE 4096 97 100 ··· 114 123 #define NS_SMSKBSIZE (NS_SMBUFSIZE + NS_AAL0_HEADER) 115 124 #define NS_LGSKBSIZE (NS_SMBUFSIZE + NS_LGBUFSIZE) 116 125 126 + /* NICStAR structures located in host memory */ 117 127 118 - /* NICStAR structures located in host memory **********************************/ 119 - 120 - 121 - 122 - /* RSQ - Receive Status Queue 128 + /* 129 + * RSQ - Receive Status Queue 123 130 * 124 131 * Written by the NICStAR, read by the device driver. 125 132 */ 126 133 127 - typedef struct ns_rsqe 128 - { 129 - u32 word_1; 130 - u32 buffer_handle; 131 - u32 final_aal5_crc32; 132 - u32 word_4; 134 + typedef struct ns_rsqe { 135 + u32 word_1; 136 + u32 buffer_handle; 137 + u32 final_aal5_crc32; 138 + u32 word_4; 133 139 } ns_rsqe; 134 140 135 141 #define ns_rsqe_vpi(ns_rsqep) \ ··· 163 175 #define ns_rsqe_cellcount(ns_rsqep) \ 164 176 (le32_to_cpu((ns_rsqep)->word_4) & 0x000001FF) 165 177 #define ns_rsqe_init(ns_rsqep) \ 166 - ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) 178 + ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) 167 179 168 180 #define NS_RSQ_NUM_ENTRIES (NS_RSQSIZE / 16) 169 181 #define NS_RSQ_ALIGNMENT NS_RSQSIZE 170 182 171 - 172 - 173 - /* RCQ - Raw Cell Queue 183 + /* 184 + * RCQ - Raw Cell Queue 174 185 * 175 186 * Written by the NICStAR, read by the device driver. 176 187 */ 177 188 178 - typedef struct cell_payload 179 - { 180 - u32 word[12]; 189 + typedef struct cell_payload { 190 + u32 word[12]; 181 191 } cell_payload; 182 192 183 - typedef struct ns_rcqe 184 - { 185 - u32 word_1; 186 - u32 word_2; 187 - u32 word_3; 188 - u32 word_4; 189 - cell_payload payload; 193 + typedef struct ns_rcqe { 194 + u32 word_1; 195 + u32 word_2; 196 + u32 word_3; 197 + u32 word_4; 198 + cell_payload payload; 190 199 } ns_rcqe; 191 200 192 201 #define NS_RCQE_SIZE 64 /* bytes */ ··· 195 210 #define ns_rcqe_nextbufhandle(ns_rcqep) \ 196 211 (le32_to_cpu((ns_rcqep)->word_2)) 197 212 198 - 199 - 200 - /* SCQ - Segmentation Channel Queue 213 + /* 214 + * SCQ - Segmentation Channel Queue 201 215 * 202 216 * Written by the device driver, read by the NICStAR. 203 217 */ 204 218 205 - typedef struct ns_scqe 206 - { 207 - u32 word_1; 208 - u32 word_2; 209 - u32 word_3; 210 - u32 word_4; 219 + typedef struct ns_scqe { 220 + u32 word_1; 221 + u32 word_2; 222 + u32 word_3; 223 + u32 word_4; 211 224 } ns_scqe; 212 225 213 226 /* NOTE: SCQ entries can be either a TBD (Transmit Buffer Descriptors) 214 - or TSR (Transmit Status Requests) */ 227 + or TSR (Transmit Status Requests) */ 215 228 216 229 #define NS_SCQE_TYPE_TBD 0x00000000 217 230 #define NS_SCQE_TYPE_TSR 0x80000000 218 - 219 231 220 232 #define NS_TBD_EOPDU 0x40000000 221 233 #define NS_TBD_AAL0 0x00000000 ··· 235 253 #define ns_tbd_mkword_4(gfc, vpi, vci, pt, clp) \ 236 254 (cpu_to_le32((gfc) << 28 | (vpi) << 20 | (vci) << 4 | (pt) << 1 | (clp))) 237 255 238 - 239 256 #define NS_TSR_INTENABLE 0x20000000 240 257 241 - #define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */ 258 + #define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */ 242 259 243 260 #define ns_tsr_mkword_1(flags) \ 244 261 (cpu_to_le32(NS_SCQE_TYPE_TSR | (flags))) ··· 254 273 255 274 #define NS_SCQE_SIZE 16 256 275 257 - 258 - 259 - /* TSQ - Transmit Status Queue 276 + /* 277 + * TSQ - Transmit Status Queue 260 278 * 261 279 * Written by the NICStAR, read by the device driver. 262 280 */ 263 281 264 - typedef struct ns_tsi 265 - { 266 - u32 word_1; 267 - u32 word_2; 282 + typedef struct ns_tsi { 283 + u32 word_1; 284 + u32 word_2; 268 285 } ns_tsi; 269 286 270 287 /* NOTE: The first word can be a status word copied from the TSR which 271 - originated the TSI, or a timer overflow indicator. In this last 272 - case, the value of the first word is all zeroes. */ 288 + originated the TSI, or a timer overflow indicator. In this last 289 + case, the value of the first word is all zeroes. */ 273 290 274 291 #define NS_TSI_EMPTY 0x80000000 275 292 #define NS_TSI_TIMESTAMP_MASK 0x00FFFFFF ··· 280 301 #define ns_tsi_init(ns_tsip) \ 281 302 ((ns_tsip)->word_2 = cpu_to_le32(NS_TSI_EMPTY)) 282 303 283 - 284 304 #define NS_TSQSIZE 8192 285 305 #define NS_TSQ_NUM_ENTRIES 1024 286 306 #define NS_TSQ_ALIGNMENT 8192 287 - 288 307 289 308 #define NS_TSI_SCDISVBR NS_TSR_SCDISVBR 290 309 ··· 293 316 #define ns_tsi_getscqpos(ns_tsip) \ 294 317 (le32_to_cpu((ns_tsip)->word_1) & 0x00007FFF) 295 318 319 + /* NICStAR structures located in local SRAM */ 296 320 297 - 298 - /* NICStAR structures located in local SRAM ***********************************/ 299 - 300 - 301 - 302 - /* RCT - Receive Connection Table 321 + /* 322 + * RCT - Receive Connection Table 303 323 * 304 324 * Written by both the NICStAR and the device driver. 305 325 */ 306 326 307 - typedef struct ns_rcte 308 - { 309 - u32 word_1; 310 - u32 buffer_handle; 311 - u32 dma_address; 312 - u32 aal5_crc32; 327 + typedef struct ns_rcte { 328 + u32 word_1; 329 + u32 buffer_handle; 330 + u32 dma_address; 331 + u32 aal5_crc32; 313 332 } ns_rcte; 314 333 315 - #define NS_RCTE_BSFB 0x00200000 /* Rev. D only */ 334 + #define NS_RCTE_BSFB 0x00200000 /* Rev. D only */ 316 335 #define NS_RCTE_NZGFC 0x00100000 317 336 #define NS_RCTE_CONNECTOPEN 0x00080000 318 337 #define NS_RCTE_AALMASK 0x00070000 ··· 331 358 #define NS_RCT_ENTRY_SIZE 4 /* Number of dwords */ 332 359 333 360 /* NOTE: We could make macros to contruct the first word of the RCTE, 334 - but that doesn't seem to make much sense... */ 361 + but that doesn't seem to make much sense... */ 335 362 336 - 337 - 338 - /* FBD - Free Buffer Descriptor 363 + /* 364 + * FBD - Free Buffer Descriptor 339 365 * 340 366 * Written by the device driver using via the command register. 341 367 */ 342 368 343 - typedef struct ns_fbd 344 - { 345 - u32 buffer_handle; 346 - u32 dma_address; 369 + typedef struct ns_fbd { 370 + u32 buffer_handle; 371 + u32 dma_address; 347 372 } ns_fbd; 348 373 349 - 350 - 351 - 352 - /* TST - Transmit Schedule Table 374 + /* 375 + * TST - Transmit Schedule Table 353 376 * 354 377 * Written by the device driver. 355 378 */ ··· 354 385 355 386 #define NS_TST_OPCODE_MASK 0x60000000 356 387 357 - #define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */ 358 - #define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */ 388 + #define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */ 389 + #define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */ 359 390 #define NS_TST_OPCODE_VARIABLE 0x40000000 360 - #define NS_TST_OPCODE_END 0x60000000 /* Jump */ 391 + #define NS_TST_OPCODE_END 0x60000000 /* Jump */ 361 392 362 393 #define ns_tste_make(opcode, sramad) (opcode | sramad) 363 394 364 395 /* NOTE: 365 396 366 397 - When the opcode is FIXED, sramad specifies the SRAM address of the 367 - SCD for that fixed rate channel. 398 + SCD for that fixed rate channel. 368 399 - When the opcode is END, sramad specifies the SRAM address of the 369 - location of the next TST entry to read. 400 + location of the next TST entry to read. 370 401 */ 371 402 372 - 373 - 374 - /* SCD - Segmentation Channel Descriptor 403 + /* 404 + * SCD - Segmentation Channel Descriptor 375 405 * 376 406 * Written by both the device driver and the NICStAR 377 407 */ 378 408 379 - typedef struct ns_scd 380 - { 381 - u32 word_1; 382 - u32 word_2; 383 - u32 partial_aal5_crc; 384 - u32 reserved; 385 - ns_scqe cache_a; 386 - ns_scqe cache_b; 409 + typedef struct ns_scd { 410 + u32 word_1; 411 + u32 word_2; 412 + u32 partial_aal5_crc; 413 + u32 reserved; 414 + ns_scqe cache_a; 415 + ns_scqe cache_b; 387 416 } ns_scd; 388 417 389 - #define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */ 390 - #define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */ 418 + #define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */ 419 + #define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */ 391 420 #define NS_SCD_TAIL_MASK_VAR 0x00001FF0 392 421 #define NS_SCD_TAIL_MASK_FIX 0x000003F0 393 422 #define NS_SCD_HEAD_MASK_VAR 0x00001FF0 ··· 393 426 #define NS_SCD_XMITFOREVER 0x02000000 394 427 395 428 /* NOTE: There are other fields in word 2 of the SCD, but as they should 396 - not be needed in the device driver they are not defined here. */ 429 + not be needed in the device driver they are not defined here. */ 397 430 398 - 399 - 400 - 401 - /* NICStAR local SRAM memory map **********************************************/ 402 - 431 + /* NICStAR local SRAM memory map */ 403 432 404 433 #define NS_RCT 0x00000 405 434 #define NS_RCT_32_END 0x03FFF ··· 418 455 #define NS_LGFBQ 0x1FC00 419 456 #define NS_LGFBQ_END 0x1FFFF 420 457 421 - 422 - 423 - /* NISCtAR operation registers ************************************************/ 424 - 458 + /* NISCtAR operation registers */ 425 459 426 460 /* See Section 3.4 of `IDT77211 NICStAR User Manual' from www.idt.com */ 427 461 428 - enum ns_regs 429 - { 430 - DR0 = 0x00, /* Data Register 0 R/W*/ 431 - DR1 = 0x04, /* Data Register 1 W */ 432 - DR2 = 0x08, /* Data Register 2 W */ 433 - DR3 = 0x0C, /* Data Register 3 W */ 434 - CMD = 0x10, /* Command W */ 435 - CFG = 0x14, /* Configuration R/W */ 436 - STAT = 0x18, /* Status R/W */ 437 - RSQB = 0x1C, /* Receive Status Queue Base W */ 438 - RSQT = 0x20, /* Receive Status Queue Tail R */ 439 - RSQH = 0x24, /* Receive Status Queue Head W */ 440 - CDC = 0x28, /* Cell Drop Counter R/clear */ 441 - VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */ 442 - ICC = 0x30, /* Invalid Cell Count R/clear */ 443 - RAWCT = 0x34, /* Raw Cell Tail R */ 444 - TMR = 0x38, /* Timer R */ 445 - TSTB = 0x3C, /* Transmit Schedule Table Base R/W */ 446 - TSQB = 0x40, /* Transmit Status Queue Base W */ 447 - TSQT = 0x44, /* Transmit Status Queue Tail R */ 448 - TSQH = 0x48, /* Transmit Status Queue Head W */ 449 - GP = 0x4C, /* General Purpose R/W */ 450 - VPM = 0x50 /* VPI/VCI Mask W */ 462 + enum ns_regs { 463 + DR0 = 0x00, /* Data Register 0 R/W */ 464 + DR1 = 0x04, /* Data Register 1 W */ 465 + DR2 = 0x08, /* Data Register 2 W */ 466 + DR3 = 0x0C, /* Data Register 3 W */ 467 + CMD = 0x10, /* Command W */ 468 + CFG = 0x14, /* Configuration R/W */ 469 + STAT = 0x18, /* Status R/W */ 470 + RSQB = 0x1C, /* Receive Status Queue Base W */ 471 + RSQT = 0x20, /* Receive Status Queue Tail R */ 472 + RSQH = 0x24, /* Receive Status Queue Head W */ 473 + CDC = 0x28, /* Cell Drop Counter R/clear */ 474 + VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */ 475 + ICC = 0x30, /* Invalid Cell Count R/clear */ 476 + RAWCT = 0x34, /* Raw Cell Tail R */ 477 + TMR = 0x38, /* Timer R */ 478 + TSTB = 0x3C, /* Transmit Schedule Table Base R/W */ 479 + TSQB = 0x40, /* Transmit Status Queue Base W */ 480 + TSQT = 0x44, /* Transmit Status Queue Tail R */ 481 + TSQH = 0x48, /* Transmit Status Queue Head W */ 482 + GP = 0x4C, /* General Purpose R/W */ 483 + VPM = 0x50 /* VPI/VCI Mask W */ 451 484 }; 452 485 453 - 454 - /* NICStAR commands issued to the CMD register ********************************/ 455 - 486 + /* NICStAR commands issued to the CMD register */ 456 487 457 488 /* Top 4 bits are command opcode, lower 28 are parameters. */ 458 489 459 490 #define NS_CMD_NO_OPERATION 0x00000000 460 - /* params always 0 */ 491 + /* params always 0 */ 461 492 462 493 #define NS_CMD_OPENCLOSE_CONNECTION 0x20000000 463 - /* b19{1=open,0=close} b18-2{SRAM addr} */ 494 + /* b19{1=open,0=close} b18-2{SRAM addr} */ 464 495 465 496 #define NS_CMD_WRITE_SRAM 0x40000000 466 - /* b18-2{SRAM addr} b1-0{burst size} */ 497 + /* b18-2{SRAM addr} b1-0{burst size} */ 467 498 468 499 #define NS_CMD_READ_SRAM 0x50000000 469 - /* b18-2{SRAM addr} */ 500 + /* b18-2{SRAM addr} */ 470 501 471 502 #define NS_CMD_WRITE_FREEBUFQ 0x60000000 472 - /* b0{large buf indicator} */ 503 + /* b0{large buf indicator} */ 473 504 474 505 #define NS_CMD_READ_UTILITY 0x80000000 475 - /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ 506 + /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ 476 507 477 508 #define NS_CMD_WRITE_UTILITY 0x90000000 478 - /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ 509 + /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ 479 510 480 511 #define NS_CMD_OPEN_CONNECTION (NS_CMD_OPENCLOSE_CONNECTION | 0x00080000) 481 512 #define NS_CMD_CLOSE_CONNECTION NS_CMD_OPENCLOSE_CONNECTION 482 513 514 + /* NICStAR configuration bits */ 483 515 484 - /* NICStAR configuration bits *************************************************/ 485 - 486 - #define NS_CFG_SWRST 0x80000000 /* Software Reset */ 487 - #define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */ 488 - #define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */ 489 - #define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */ 490 - #define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue 491 - Interrupt Enable */ 492 - #define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */ 493 - #define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */ 494 - #define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */ 495 - #define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */ 496 - #define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */ 497 - #define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */ 498 - #define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt 499 - Handling */ 500 - #define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */ 501 - #define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full 502 - Interrupt Enable */ 503 - #define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */ 504 - #define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt 505 - Enable */ 506 - #define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */ 507 - #define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt 508 - Enable */ 509 - #define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt 510 - Enable */ 511 - #define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */ 512 - #define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full 513 - Interrupt Enable */ 514 - #define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */ 516 + #define NS_CFG_SWRST 0x80000000 /* Software Reset */ 517 + #define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */ 518 + #define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */ 519 + #define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */ 520 + #define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue 521 + Interrupt Enable */ 522 + #define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */ 523 + #define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */ 524 + #define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */ 525 + #define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */ 526 + #define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */ 527 + #define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */ 528 + #define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt 529 + Handling */ 530 + #define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */ 531 + #define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full 532 + Interrupt Enable */ 533 + #define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */ 534 + #define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt 535 + Enable */ 536 + #define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */ 537 + #define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt 538 + Enable */ 539 + #define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt 540 + Enable */ 541 + #define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */ 542 + #define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full 543 + Interrupt Enable */ 544 + #define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */ 515 545 516 546 #define NS_CFG_SMBUFSIZE_48 0x00000000 517 547 #define NS_CFG_SMBUFSIZE_96 0x08000000 ··· 535 579 #define NS_CFG_RXINT_624US 0x00003000 536 580 #define NS_CFG_RXINT_899US 0x00004000 537 581 582 + /* NICStAR STATus bits */ 538 583 539 - /* NICStAR STATus bits ********************************************************/ 540 - 541 - #define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */ 542 - #define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */ 543 - #define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */ 544 - #define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */ 545 - #define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */ 546 - #define NS_STAT_TMROF 0x00000800 /* Timer Overflow */ 547 - #define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */ 548 - #define NS_STAT_CMDBZ 0x00000200 /* Command Busy */ 549 - #define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */ 550 - #define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */ 551 - #define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */ 552 - #define NS_STAT_EOPDU 0x00000020 /* End of PDU */ 553 - #define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */ 554 - #define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */ 555 - #define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */ 556 - #define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */ 584 + #define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */ 585 + #define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */ 586 + #define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */ 587 + #define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */ 588 + #define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */ 589 + #define NS_STAT_TMROF 0x00000800 /* Timer Overflow */ 590 + #define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */ 591 + #define NS_STAT_CMDBZ 0x00000200 /* Command Busy */ 592 + #define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */ 593 + #define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */ 594 + #define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */ 595 + #define NS_STAT_EOPDU 0x00000020 /* End of PDU */ 596 + #define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */ 597 + #define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */ 598 + #define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */ 599 + #define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */ 557 600 558 601 #define ns_stat_sfbqc_get(stat) (((stat) & NS_STAT_SFBQC_MASK) >> 23) 559 602 #define ns_stat_lfbqc_get(stat) (((stat) & NS_STAT_LFBQC_MASK) >> 15) 560 603 561 - 562 - 563 - /* #defines which depend on other #defines ************************************/ 564 - 604 + /* #defines which depend on other #defines */ 565 605 566 606 #define NS_TST0 NS_TST_FRSCD 567 607 #define NS_TST1 (NS_TST_FRSCD + NS_TST_NUM_ENTRIES + 1) ··· 624 672 #define NS_CFG_TSQFIE_OPT 0x00000000 625 673 #endif /* ENABLE_TSQFIE */ 626 674 627 - 628 - /* PCI stuff ******************************************************************/ 675 + /* PCI stuff */ 629 676 630 677 #ifndef PCI_VENDOR_ID_IDT 631 678 #define PCI_VENDOR_ID_IDT 0x111D ··· 634 683 #define PCI_DEVICE_ID_IDT_IDT77201 0x0001 635 684 #endif /* PCI_DEVICE_ID_IDT_IDT77201 */ 636 685 637 - 638 - 639 - /* Device driver structures ***************************************************/ 640 - 686 + /* Device driver structures */ 641 687 642 688 struct ns_skb_cb { 643 - u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ 689 + u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ 644 690 }; 645 691 646 692 #define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb)) 647 693 648 - typedef struct tsq_info 649 - { 650 - void *org; 651 - ns_tsi *base; 652 - ns_tsi *next; 653 - ns_tsi *last; 694 + typedef struct tsq_info { 695 + void *org; 696 + ns_tsi *base; 697 + ns_tsi *next; 698 + ns_tsi *last; 654 699 } tsq_info; 655 700 656 - 657 - typedef struct scq_info 658 - { 659 - void *org; 660 - ns_scqe *base; 661 - ns_scqe *last; 662 - ns_scqe *next; 663 - volatile ns_scqe *tail; /* Not related to the nicstar register */ 664 - unsigned num_entries; 665 - struct sk_buff **skb; /* Pointer to an array of pointers 666 - to the sk_buffs used for tx */ 667 - u32 scd; /* SRAM address of the corresponding 668 - SCD */ 669 - int tbd_count; /* Only meaningful on variable rate */ 670 - wait_queue_head_t scqfull_waitq; 671 - volatile char full; /* SCQ full indicator */ 672 - spinlock_t lock; /* SCQ spinlock */ 701 + typedef struct scq_info { 702 + void *org; 703 + ns_scqe *base; 704 + ns_scqe *last; 705 + ns_scqe *next; 706 + volatile ns_scqe *tail; /* Not related to the nicstar register */ 707 + unsigned num_entries; 708 + struct sk_buff **skb; /* Pointer to an array of pointers 709 + to the sk_buffs used for tx */ 710 + u32 scd; /* SRAM address of the corresponding 711 + SCD */ 712 + int tbd_count; /* Only meaningful on variable rate */ 713 + wait_queue_head_t scqfull_waitq; 714 + volatile char full; /* SCQ full indicator */ 715 + spinlock_t lock; /* SCQ spinlock */ 673 716 } scq_info; 674 717 675 - 676 - 677 - typedef struct rsq_info 678 - { 679 - void *org; 680 - ns_rsqe *base; 681 - ns_rsqe *next; 682 - ns_rsqe *last; 718 + typedef struct rsq_info { 719 + void *org; 720 + ns_rsqe *base; 721 + ns_rsqe *next; 722 + ns_rsqe *last; 683 723 } rsq_info; 684 724 685 - 686 - typedef struct skb_pool 687 - { 688 - volatile int count; /* number of buffers in the queue */ 689 - struct sk_buff_head queue; 725 + typedef struct skb_pool { 726 + volatile int count; /* number of buffers in the queue */ 727 + struct sk_buff_head queue; 690 728 } skb_pool; 691 729 692 730 /* NOTE: for small and large buffer pools, the count is not used, as the 693 731 actual value used for buffer management is the one read from the 694 732 card. */ 695 733 696 - 697 - typedef struct vc_map 698 - { 699 - volatile unsigned int tx:1; /* TX vc? */ 700 - volatile unsigned int rx:1; /* RX vc? */ 701 - struct atm_vcc *tx_vcc, *rx_vcc; 702 - struct sk_buff *rx_iov; /* RX iovector skb */ 703 - scq_info *scq; /* To keep track of the SCQ */ 704 - u32 cbr_scd; /* SRAM address of the corresponding 705 - SCD. 0x00000000 for UBR/VBR/ABR */ 706 - int tbd_count; 734 + typedef struct vc_map { 735 + volatile unsigned int tx:1; /* TX vc? */ 736 + volatile unsigned int rx:1; /* RX vc? */ 737 + struct atm_vcc *tx_vcc, *rx_vcc; 738 + struct sk_buff *rx_iov; /* RX iovector skb */ 739 + scq_info *scq; /* To keep track of the SCQ */ 740 + u32 cbr_scd; /* SRAM address of the corresponding 741 + SCD. 0x00000000 for UBR/VBR/ABR */ 742 + int tbd_count; 707 743 } vc_map; 708 744 709 - 710 - struct ns_skb_data 711 - { 745 + struct ns_skb_data { 712 746 struct atm_vcc *vcc; 713 747 int iovcnt; 714 748 }; 715 749 716 750 #define NS_SKB(skb) (((struct ns_skb_data *) (skb)->cb)) 717 751 718 - 719 - typedef struct ns_dev 720 - { 721 - int index; /* Card ID to the device driver */ 722 - int sram_size; /* In k x 32bit words. 32 or 128 */ 723 - void __iomem *membase; /* Card's memory base address */ 724 - unsigned long max_pcr; 725 - int rct_size; /* Number of entries */ 726 - int vpibits; 727 - int vcibits; 728 - struct pci_dev *pcidev; 729 - struct atm_dev *atmdev; 730 - tsq_info tsq; 731 - rsq_info rsq; 732 - scq_info *scq0, *scq1, *scq2; /* VBR SCQs */ 733 - skb_pool sbpool; /* Small buffers */ 734 - skb_pool lbpool; /* Large buffers */ 735 - skb_pool hbpool; /* Pre-allocated huge buffers */ 736 - skb_pool iovpool; /* iovector buffers */ 737 - volatile int efbie; /* Empty free buf. queue int. enabled */ 738 - volatile u32 tst_addr; /* SRAM address of the TST in use */ 739 - volatile int tst_free_entries; 740 - vc_map vcmap[NS_MAX_RCTSIZE]; 741 - vc_map *tste2vc[NS_TST_NUM_ENTRIES]; 742 - vc_map *scd2vc[NS_FRSCD_NUM]; 743 - buf_nr sbnr; 744 - buf_nr lbnr; 745 - buf_nr hbnr; 746 - buf_nr iovnr; 747 - int sbfqc; 748 - int lbfqc; 749 - u32 sm_handle; 750 - u32 sm_addr; 751 - u32 lg_handle; 752 - u32 lg_addr; 753 - struct sk_buff *rcbuf; /* Current raw cell buffer */ 754 - u32 rawch; /* Raw cell queue head */ 755 - unsigned intcnt; /* Interrupt counter */ 756 - spinlock_t int_lock; /* Interrupt lock */ 757 - spinlock_t res_lock; /* Card resource lock */ 752 + typedef struct ns_dev { 753 + int index; /* Card ID to the device driver */ 754 + int sram_size; /* In k x 32bit words. 32 or 128 */ 755 + void __iomem *membase; /* Card's memory base address */ 756 + unsigned long max_pcr; 757 + int rct_size; /* Number of entries */ 758 + int vpibits; 759 + int vcibits; 760 + struct pci_dev *pcidev; 761 + struct atm_dev *atmdev; 762 + tsq_info tsq; 763 + rsq_info rsq; 764 + scq_info *scq0, *scq1, *scq2; /* VBR SCQs */ 765 + skb_pool sbpool; /* Small buffers */ 766 + skb_pool lbpool; /* Large buffers */ 767 + skb_pool hbpool; /* Pre-allocated huge buffers */ 768 + skb_pool iovpool; /* iovector buffers */ 769 + volatile int efbie; /* Empty free buf. queue int. enabled */ 770 + volatile u32 tst_addr; /* SRAM address of the TST in use */ 771 + volatile int tst_free_entries; 772 + vc_map vcmap[NS_MAX_RCTSIZE]; 773 + vc_map *tste2vc[NS_TST_NUM_ENTRIES]; 774 + vc_map *scd2vc[NS_FRSCD_NUM]; 775 + buf_nr sbnr; 776 + buf_nr lbnr; 777 + buf_nr hbnr; 778 + buf_nr iovnr; 779 + int sbfqc; 780 + int lbfqc; 781 + u32 sm_handle; 782 + u32 sm_addr; 783 + u32 lg_handle; 784 + u32 lg_addr; 785 + struct sk_buff *rcbuf; /* Current raw cell buffer */ 786 + u32 rawch; /* Raw cell queue head */ 787 + unsigned intcnt; /* Interrupt counter */ 788 + spinlock_t int_lock; /* Interrupt lock */ 789 + spinlock_t res_lock; /* Card resource lock */ 758 790 } ns_dev; 759 791 760 - 761 792 /* NOTE: Each tste2vc entry relates a given TST entry to the corresponding 762 - CBR vc. If the entry is not allocated, it must be NULL. 763 - 764 - There are two TSTs so the driver can modify them on the fly 765 - without stopping the transmission. 766 - 767 - scd2vc allows us to find out unused fixed rate SCDs, because 768 - they must have a NULL pointer here. */ 793 + CBR vc. If the entry is not allocated, it must be NULL. 769 794 795 + There are two TSTs so the driver can modify them on the fly 796 + without stopping the transmission. 797 + 798 + scd2vc allows us to find out unused fixed rate SCDs, because 799 + they must have a NULL pointer here. */ 770 800 771 801 #endif /* _LINUX_NICSTAR_H_ */
+160 -188
drivers/atm/nicstarmac.c
··· 13 13 14 14 #define CYCLE_DELAY 5 15 15 16 - /* This was the original definition 16 + /* 17 + This was the original definition 17 18 #define osp_MicroDelay(microsec) \ 18 19 do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) 19 20 */ 20 21 #define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \ 21 22 udelay((useconds));} 22 - 23 - 24 - /* The following tables represent the timing diagrams found in 23 + /* 24 + * The following tables represent the timing diagrams found in 25 25 * the Data Sheet for the Xicor X25020 EEProm. The #defines below 26 26 * represent the bits in the NICStAR's General Purpose register 27 27 * that must be toggled for the corresponding actions on the EEProm ··· 31 31 /* Write Data To EEProm from SI line on rising edge of CLK */ 32 32 /* Read Data From EEProm on falling edge of CLK */ 33 33 34 - #define CS_HIGH 0x0002 /* Chip select high */ 35 - #define CS_LOW 0x0000 /* Chip select low (active low)*/ 36 - #define CLK_HIGH 0x0004 /* Clock high */ 37 - #define CLK_LOW 0x0000 /* Clock low */ 38 - #define SI_HIGH 0x0001 /* Serial input data high */ 39 - #define SI_LOW 0x0000 /* Serial input data low */ 34 + #define CS_HIGH 0x0002 /* Chip select high */ 35 + #define CS_LOW 0x0000 /* Chip select low (active low) */ 36 + #define CLK_HIGH 0x0004 /* Clock high */ 37 + #define CLK_LOW 0x0000 /* Clock low */ 38 + #define SI_HIGH 0x0001 /* Serial input data high */ 39 + #define SI_LOW 0x0000 /* Serial input data low */ 40 40 41 41 /* Read Status Register = 0000 0101b */ 42 42 #if 0 43 - static u_int32_t rdsrtab[] = 44 - { 45 - CS_HIGH | CLK_HIGH, 46 - CS_LOW | CLK_LOW, 47 - CLK_HIGH, /* 0 */ 48 - CLK_LOW, 49 - CLK_HIGH, /* 0 */ 50 - CLK_LOW, 51 - CLK_HIGH, /* 0 */ 52 - CLK_LOW, 53 - CLK_HIGH, /* 0 */ 54 - CLK_LOW, 55 - CLK_HIGH, /* 0 */ 56 - CLK_LOW | SI_HIGH, 57 - CLK_HIGH | SI_HIGH, /* 1 */ 58 - CLK_LOW | SI_LOW, 59 - CLK_HIGH, /* 0 */ 60 - CLK_LOW | SI_HIGH, 61 - CLK_HIGH | SI_HIGH /* 1 */ 43 + static u_int32_t rdsrtab[] = { 44 + CS_HIGH | CLK_HIGH, 45 + CS_LOW | CLK_LOW, 46 + CLK_HIGH, /* 0 */ 47 + CLK_LOW, 48 + CLK_HIGH, /* 0 */ 49 + CLK_LOW, 50 + CLK_HIGH, /* 0 */ 51 + CLK_LOW, 52 + CLK_HIGH, /* 0 */ 53 + CLK_LOW, 54 + CLK_HIGH, /* 0 */ 55 + CLK_LOW | SI_HIGH, 56 + CLK_HIGH | SI_HIGH, /* 1 */ 57 + CLK_LOW | SI_LOW, 58 + CLK_HIGH, /* 0 */ 59 + CLK_LOW | SI_HIGH, 60 + CLK_HIGH | SI_HIGH /* 1 */ 62 61 }; 63 - #endif /* 0 */ 64 - 62 + #endif /* 0 */ 65 63 66 64 /* Read from EEPROM = 0000 0011b */ 67 - static u_int32_t readtab[] = 68 - { 69 - /* 70 - CS_HIGH | CLK_HIGH, 71 - */ 72 - CS_LOW | CLK_LOW, 73 - CLK_HIGH, /* 0 */ 74 - CLK_LOW, 75 - CLK_HIGH, /* 0 */ 76 - CLK_LOW, 77 - CLK_HIGH, /* 0 */ 78 - CLK_LOW, 79 - CLK_HIGH, /* 0 */ 80 - CLK_LOW, 81 - CLK_HIGH, /* 0 */ 82 - CLK_LOW, 83 - CLK_HIGH, /* 0 */ 84 - CLK_LOW | SI_HIGH, 85 - CLK_HIGH | SI_HIGH, /* 1 */ 86 - CLK_LOW | SI_HIGH, 87 - CLK_HIGH | SI_HIGH /* 1 */ 65 + static u_int32_t readtab[] = { 66 + /* 67 + CS_HIGH | CLK_HIGH, 68 + */ 69 + CS_LOW | CLK_LOW, 70 + CLK_HIGH, /* 0 */ 71 + CLK_LOW, 72 + CLK_HIGH, /* 0 */ 73 + CLK_LOW, 74 + CLK_HIGH, /* 0 */ 75 + CLK_LOW, 76 + CLK_HIGH, /* 0 */ 77 + CLK_LOW, 78 + CLK_HIGH, /* 0 */ 79 + CLK_LOW, 80 + CLK_HIGH, /* 0 */ 81 + CLK_LOW | SI_HIGH, 82 + CLK_HIGH | SI_HIGH, /* 1 */ 83 + CLK_LOW | SI_HIGH, 84 + CLK_HIGH | SI_HIGH /* 1 */ 88 85 }; 89 - 90 86 91 87 /* Clock to read from/write to the eeprom */ 92 - static u_int32_t clocktab[] = 93 - { 94 - CLK_LOW, 95 - CLK_HIGH, 96 - CLK_LOW, 97 - CLK_HIGH, 98 - CLK_LOW, 99 - CLK_HIGH, 100 - CLK_LOW, 101 - CLK_HIGH, 102 - CLK_LOW, 103 - CLK_HIGH, 104 - CLK_LOW, 105 - CLK_HIGH, 106 - CLK_LOW, 107 - CLK_HIGH, 108 - CLK_LOW, 109 - CLK_HIGH, 110 - CLK_LOW 88 + static u_int32_t clocktab[] = { 89 + CLK_LOW, 90 + CLK_HIGH, 91 + CLK_LOW, 92 + CLK_HIGH, 93 + CLK_LOW, 94 + CLK_HIGH, 95 + CLK_LOW, 96 + CLK_HIGH, 97 + CLK_LOW, 98 + CLK_HIGH, 99 + CLK_LOW, 100 + CLK_HIGH, 101 + CLK_LOW, 102 + CLK_HIGH, 103 + CLK_LOW, 104 + CLK_HIGH, 105 + CLK_LOW 111 106 }; 112 - 113 107 114 108 #define NICSTAR_REG_WRITE(bs, reg, val) \ 115 109 while ( readl(bs + STAT) & 0x0200 ) ; \ ··· 118 124 * register. 119 125 */ 120 126 #if 0 121 - u_int32_t 122 - nicstar_read_eprom_status( virt_addr_t base ) 127 + u_int32_t nicstar_read_eprom_status(virt_addr_t base) 123 128 { 124 - u_int32_t val; 125 - u_int32_t rbyte; 126 - int32_t i, j; 129 + u_int32_t val; 130 + u_int32_t rbyte; 131 + int32_t i, j; 127 132 128 - /* Send read instruction */ 129 - val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0; 133 + /* Send read instruction */ 134 + val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; 130 135 131 - for (i=0; i<ARRAY_SIZE(rdsrtab); i++) 132 - { 133 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 134 - (val | rdsrtab[i]) ); 135 - osp_MicroDelay( CYCLE_DELAY ); 136 - } 136 + for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) { 137 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 138 + (val | rdsrtab[i])); 139 + osp_MicroDelay(CYCLE_DELAY); 140 + } 137 141 138 - /* Done sending instruction - now pull data off of bit 16, MSB first */ 139 - /* Data clocked out of eeprom on falling edge of clock */ 142 + /* Done sending instruction - now pull data off of bit 16, MSB first */ 143 + /* Data clocked out of eeprom on falling edge of clock */ 140 144 141 - rbyte = 0; 142 - for (i=7, j=0; i>=0; i--) 143 - { 144 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 145 - (val | clocktab[j++]) ); 146 - rbyte |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE) 147 - & 0x00010000) >> 16) << i); 148 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 149 - (val | clocktab[j++]) ); 150 - osp_MicroDelay( CYCLE_DELAY ); 151 - } 152 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 ); 153 - osp_MicroDelay( CYCLE_DELAY ); 154 - return rbyte; 145 + rbyte = 0; 146 + for (i = 7, j = 0; i >= 0; i--) { 147 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 148 + (val | clocktab[j++])); 149 + rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) 150 + & 0x00010000) >> 16) << i); 151 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 152 + (val | clocktab[j++])); 153 + osp_MicroDelay(CYCLE_DELAY); 154 + } 155 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2); 156 + osp_MicroDelay(CYCLE_DELAY); 157 + return rbyte; 155 158 } 156 - #endif /* 0 */ 157 - 159 + #endif /* 0 */ 158 160 159 161 /* 160 162 * This routine will clock the Read_data function into the X2520 161 163 * eeprom, followed by the address to read from, through the NicSTaR's General 162 164 * Purpose register. 163 165 */ 164 - 165 - static u_int8_t 166 - read_eprom_byte(virt_addr_t base, u_int8_t offset) 166 + 167 + static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset) 167 168 { 168 - u_int32_t val = 0; 169 - int i,j=0; 170 - u_int8_t tempread = 0; 169 + u_int32_t val = 0; 170 + int i, j = 0; 171 + u_int8_t tempread = 0; 171 172 172 - val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0; 173 + val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; 173 174 174 - /* Send READ instruction */ 175 - for (i=0; i<ARRAY_SIZE(readtab); i++) 176 - { 177 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 178 - (val | readtab[i]) ); 179 - osp_MicroDelay( CYCLE_DELAY ); 180 - } 175 + /* Send READ instruction */ 176 + for (i = 0; i < ARRAY_SIZE(readtab); i++) { 177 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 178 + (val | readtab[i])); 179 + osp_MicroDelay(CYCLE_DELAY); 180 + } 181 181 182 - /* Next, we need to send the byte address to read from */ 183 - for (i=7; i>=0; i--) 184 - { 185 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 186 - (val | clocktab[j++] | ((offset >> i) & 1) ) ); 187 - osp_MicroDelay(CYCLE_DELAY); 188 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 189 - (val | clocktab[j++] | ((offset >> i) & 1) ) ); 190 - osp_MicroDelay( CYCLE_DELAY ); 191 - } 182 + /* Next, we need to send the byte address to read from */ 183 + for (i = 7; i >= 0; i--) { 184 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 185 + (val | clocktab[j++] | ((offset >> i) & 1))); 186 + osp_MicroDelay(CYCLE_DELAY); 187 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 188 + (val | clocktab[j++] | ((offset >> i) & 1))); 189 + osp_MicroDelay(CYCLE_DELAY); 190 + } 192 191 193 - j = 0; 194 - 195 - /* Now, we can read data from the eeprom by clocking it in */ 196 - for (i=7; i>=0; i--) 197 - { 198 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 199 - (val | clocktab[j++]) ); 200 - osp_MicroDelay( CYCLE_DELAY ); 201 - tempread |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) 202 - & 0x00010000) >> 16) << i); 203 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 204 - (val | clocktab[j++]) ); 205 - osp_MicroDelay( CYCLE_DELAY ); 206 - } 192 + j = 0; 207 193 208 - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 ); 209 - osp_MicroDelay( CYCLE_DELAY ); 210 - return tempread; 194 + /* Now, we can read data from the eeprom by clocking it in */ 195 + for (i = 7; i >= 0; i--) { 196 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 197 + (val | clocktab[j++])); 198 + osp_MicroDelay(CYCLE_DELAY); 199 + tempread |= 200 + (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) 201 + & 0x00010000) >> 16) << i); 202 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 203 + (val | clocktab[j++])); 204 + osp_MicroDelay(CYCLE_DELAY); 205 + } 206 + 207 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2); 208 + osp_MicroDelay(CYCLE_DELAY); 209 + return tempread; 211 210 } 212 211 213 - 214 - static void 215 - nicstar_init_eprom( virt_addr_t base ) 212 + static void nicstar_init_eprom(virt_addr_t base) 216 213 { 217 - u_int32_t val; 214 + u_int32_t val; 218 215 219 - /* 220 - * turn chip select off 221 - */ 222 - val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; 216 + /* 217 + * turn chip select off 218 + */ 219 + val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; 223 220 224 - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 225 - (val | CS_HIGH | CLK_HIGH)); 226 - osp_MicroDelay( CYCLE_DELAY ); 221 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 222 + (val | CS_HIGH | CLK_HIGH)); 223 + osp_MicroDelay(CYCLE_DELAY); 227 224 228 - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 229 - (val | CS_HIGH | CLK_LOW)); 230 - osp_MicroDelay( CYCLE_DELAY ); 225 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 226 + (val | CS_HIGH | CLK_LOW)); 227 + osp_MicroDelay(CYCLE_DELAY); 231 228 232 - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 233 - (val | CS_HIGH | CLK_HIGH)); 234 - osp_MicroDelay( CYCLE_DELAY ); 229 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 230 + (val | CS_HIGH | CLK_HIGH)); 231 + osp_MicroDelay(CYCLE_DELAY); 235 232 236 - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 237 - (val | CS_HIGH | CLK_LOW)); 238 - osp_MicroDelay( CYCLE_DELAY ); 233 + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 234 + (val | CS_HIGH | CLK_LOW)); 235 + osp_MicroDelay(CYCLE_DELAY); 239 236 } 240 - 241 237 242 238 /* 243 239 * This routine will be the interface to the ReadPromByte function 244 240 * above. 245 - */ 241 + */ 246 242 247 243 static void 248 - nicstar_read_eprom( 249 - virt_addr_t base, 250 - u_int8_t prom_offset, 251 - u_int8_t *buffer, 252 - u_int32_t nbytes ) 244 + nicstar_read_eprom(virt_addr_t base, 245 + u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes) 253 246 { 254 - u_int i; 255 - 256 - for (i=0; i<nbytes; i++) 257 - { 258 - buffer[i] = read_eprom_byte( base, prom_offset ); 259 - ++prom_offset; 260 - osp_MicroDelay( CYCLE_DELAY ); 261 - } 247 + u_int i; 248 + 249 + for (i = 0; i < nbytes; i++) { 250 + buffer[i] = read_eprom_byte(base, prom_offset); 251 + ++prom_offset; 252 + osp_MicroDelay(CYCLE_DELAY); 253 + } 262 254 } 263 - 264 - 265 - /* 266 - void osp_MicroDelay(int x) { 267 - 268 - } 269 - */ 270 -