Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ixp4xx_hss: Convert to use DT probing

IXP4xx is being migrated to device tree only. Convert this
driver to use device tree probing.

Pull in all the boardfile code from the one boardfile and
make it local, pull all the boardfile parameters from the
device tree instead of the board file.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Linus Walleij and committed by
David S. Miller
35aefaad 9c37b09d

+185 -75
+185 -75
drivers/net/wan/ixp4xx_hss.c
··· 17 17 #include <linux/io.h> 18 18 #include <linux/kernel.h> 19 19 #include <linux/platform_device.h> 20 - #include <linux/platform_data/wan_ixp4xx_hss.h> 21 20 #include <linux/poll.h> 22 21 #include <linux/slab.h> 22 + #include <linux/gpio/consumer.h> 23 + #include <linux/of.h> 23 24 #include <linux/soc/ixp4xx/npe.h> 24 25 #include <linux/soc/ixp4xx/qmgr.h> 25 26 #include <linux/soc/ixp4xx/cpu.h> 27 + 28 + /* This is what all IXP4xx platforms we know uses, if more frequencies 29 + * are needed, we need to migrate to the clock framework. 30 + */ 31 + #define IXP4XX_TIMER_FREQ 66666000 26 32 27 33 #define DEBUG_DESC 0 28 34 #define DEBUG_RX 0 ··· 56 50 #define NAPI_WEIGHT 16 57 51 58 52 /* Queue IDs */ 59 - #define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */ 60 53 #define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */ 61 54 #define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */ 62 55 #define HSS0_PKT_TX1_QUEUE 15 ··· 67 62 #define HSS0_PKT_RXFREE3_QUEUE 21 68 63 #define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */ 69 64 70 - #define HSS1_CHL_RXTRIG_QUEUE 10 71 65 #define HSS1_PKT_RX_QUEUE 0 72 66 #define HSS1_PKT_TX0_QUEUE 5 73 67 #define HSS1_PKT_TX1_QUEUE 6 ··· 256 252 struct port { 257 253 struct device *dev; 258 254 struct npe *npe; 255 + unsigned int txreadyq; 256 + unsigned int rxtrigq; 257 + unsigned int rxfreeq; 258 + unsigned int rxq; 259 + unsigned int txq; 260 + unsigned int txdoneq; 261 + struct gpio_desc *cts; 262 + struct gpio_desc *rts; 263 + struct gpio_desc *dcd; 264 + struct gpio_desc *dtr; 265 + struct gpio_desc *clk_internal; 259 266 struct net_device *netdev; 260 267 struct napi_struct napi; 261 - struct hss_plat_info *plat; 262 268 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 263 269 struct desc *desc_tab; /* coherent */ 264 270 dma_addr_t desc_tab_phys; ··· 335 321 static int ports_open; 336 322 static struct dma_pool *dma_pool; 337 323 static DEFINE_SPINLOCK(npe_lock); 338 - 339 - static const struct { 340 - int tx, txdone, rx, rxfree; 341 - } queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE, 342 - HSS0_PKT_RXFREE0_QUEUE}, 343 - {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE, 344 - HSS1_PKT_RXFREE0_QUEUE}, 345 - }; 346 324 347 325 /***************************************************************************** 348 326 * utility functions ··· 651 645 #if DEBUG_RX 652 646 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); 653 647 #endif 654 - qmgr_disable_irq(queue_ids[port->id].rx); 648 + qmgr_disable_irq(port->rxq); 655 649 napi_schedule(&port->napi); 656 650 } 657 651 ··· 659 653 { 660 654 struct port *port = container_of(napi, struct port, napi); 661 655 struct net_device *dev = port->netdev; 662 - unsigned int rxq = queue_ids[port->id].rx; 663 - unsigned int rxfreeq = queue_ids[port->id].rxfree; 656 + unsigned int rxq = port->rxq; 657 + unsigned int rxfreeq = port->rxfreeq; 664 658 int received = 0; 665 659 666 660 #if DEBUG_RX ··· 801 795 #if DEBUG_TX 802 796 printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n"); 803 797 #endif 804 - while ((n_desc = queue_get_desc(queue_ids[port->id].txdone, 798 + while ((n_desc = queue_get_desc(port->txdoneq, 805 799 port, 1)) >= 0) { 806 800 struct desc *desc; 807 801 int start; ··· 819 813 free_buffer_irq(port->tx_buff_tab[n_desc]); 820 814 port->tx_buff_tab[n_desc] = NULL; 821 815 822 - start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 823 - queue_put_desc(port->plat->txreadyq, 816 + start = qmgr_stat_below_low_watermark(port->txreadyq); 817 + queue_put_desc(port->txreadyq, 824 818 tx_desc_phys(port, n_desc), desc); 825 819 if (start) { /* TX-ready queue was empty */ 826 820 #if DEBUG_TX ··· 835 829 static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) 836 830 { 837 831 struct port *port = dev_to_port(dev); 838 - unsigned int txreadyq = port->plat->txreadyq; 832 + unsigned int txreadyq = port->txreadyq; 839 833 int len, offset, bytes, n; 840 834 void *mem; 841 835 u32 phys; ··· 895 889 desc->buf_len = desc->pkt_len = len; 896 890 897 891 wmb(); 898 - queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); 892 + queue_put_desc(port->txq, tx_desc_phys(port, n), desc); 899 893 900 894 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 901 895 #if DEBUG_TX ··· 922 916 { 923 917 int err; 924 918 925 - err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0, 919 + err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0, 926 920 "%s:RX-free", port->netdev->name); 927 921 if (err) 928 922 return err; 929 923 930 - err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0, 924 + err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0, 931 925 "%s:RX", port->netdev->name); 932 926 if (err) 933 927 goto rel_rxfree; 934 928 935 - err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0, 929 + err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0, 936 930 "%s:TX", port->netdev->name); 937 931 if (err) 938 932 goto rel_rx; 939 933 940 - err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 934 + err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0, 941 935 "%s:TX-ready", port->netdev->name); 942 936 if (err) 943 937 goto rel_tx; 944 938 945 - err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0, 939 + err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0, 946 940 "%s:TX-done", port->netdev->name); 947 941 if (err) 948 942 goto rel_txready; 949 943 return 0; 950 944 951 945 rel_txready: 952 - qmgr_release_queue(port->plat->txreadyq); 946 + qmgr_release_queue(port->txreadyq); 953 947 rel_tx: 954 - qmgr_release_queue(queue_ids[port->id].tx); 948 + qmgr_release_queue(port->txq); 955 949 rel_rx: 956 - qmgr_release_queue(queue_ids[port->id].rx); 950 + qmgr_release_queue(port->rxq); 957 951 rel_rxfree: 958 - qmgr_release_queue(queue_ids[port->id].rxfree); 952 + qmgr_release_queue(port->rxfreeq); 959 953 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 960 954 port->netdev->name); 961 955 return err; ··· 963 957 964 958 static void release_hdlc_queues(struct port *port) 965 959 { 966 - qmgr_release_queue(queue_ids[port->id].rxfree); 967 - qmgr_release_queue(queue_ids[port->id].rx); 968 - qmgr_release_queue(queue_ids[port->id].txdone); 969 - qmgr_release_queue(queue_ids[port->id].tx); 970 - qmgr_release_queue(port->plat->txreadyq); 960 + qmgr_release_queue(port->rxfreeq); 961 + qmgr_release_queue(port->rxq); 962 + qmgr_release_queue(port->txdoneq); 963 + qmgr_release_queue(port->txq); 964 + qmgr_release_queue(port->txreadyq); 971 965 } 972 966 973 967 static int init_hdlc_queues(struct port *port) ··· 1052 1046 } 1053 1047 } 1054 1048 1049 + static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data) 1050 + { 1051 + struct net_device *dev = data; 1052 + struct port *port = dev_to_port(dev); 1053 + int val; 1054 + 1055 + val = gpiod_get_value(port->dcd); 1056 + hss_hdlc_set_carrier(dev, val); 1057 + 1058 + return IRQ_HANDLED; 1059 + } 1060 + 1055 1061 static int hss_hdlc_open(struct net_device *dev) 1056 1062 { 1057 1063 struct port *port = dev_to_port(dev); 1058 1064 unsigned long flags; 1059 1065 int i, err = 0; 1066 + int val; 1060 1067 1061 1068 err = hdlc_open(dev); 1062 1069 if (err) ··· 1088 1069 goto err_destroy_queues; 1089 1070 1090 1071 spin_lock_irqsave(&npe_lock, flags); 1091 - if (port->plat->open) { 1092 - err = port->plat->open(port->id, dev, hss_hdlc_set_carrier); 1093 - if (err) 1094 - goto err_unlock; 1072 + 1073 + /* Set the carrier, the GPIO is flagged active low so this will return 1074 + * 1 if DCD is asserted. 1075 + */ 1076 + val = gpiod_get_value(port->dcd); 1077 + hss_hdlc_set_carrier(dev, val); 1078 + 1079 + /* Set up an IRQ for DCD */ 1080 + err = request_irq(gpiod_to_irq(port->dcd), hss_hdlc_dcd_irq, 0, "IXP4xx HSS", dev); 1081 + if (err) { 1082 + dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n", err); 1083 + goto err_unlock; 1095 1084 } 1085 + 1086 + /* GPIOs are flagged active low so this asserts DTR and RTS */ 1087 + gpiod_set_value(port->dtr, 1); 1088 + gpiod_set_value(port->rts, 1); 1096 1089 1097 1090 spin_unlock_irqrestore(&npe_lock, flags); 1098 1091 1099 1092 /* Populate queues with buffers, no failure after this point */ 1100 1093 for (i = 0; i < TX_DESCS; i++) 1101 - queue_put_desc(port->plat->txreadyq, 1094 + queue_put_desc(port->txreadyq, 1102 1095 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1103 1096 1104 1097 for (i = 0; i < RX_DESCS; i++) 1105 - queue_put_desc(queue_ids[port->id].rxfree, 1098 + queue_put_desc(port->rxfreeq, 1106 1099 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1107 1100 1108 1101 napi_enable(&port->napi); 1109 1102 netif_start_queue(dev); 1110 1103 1111 - qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, 1104 + qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1112 1105 hss_hdlc_rx_irq, dev); 1113 1106 1114 - qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, 1107 + qmgr_set_irq(port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY, 1115 1108 hss_hdlc_txdone_irq, dev); 1116 - qmgr_enable_irq(queue_ids[port->id].txdone); 1109 + qmgr_enable_irq(port->txdoneq); 1117 1110 1118 1111 ports_open++; 1119 1112 ··· 1156 1125 1157 1126 spin_lock_irqsave(&npe_lock, flags); 1158 1127 ports_open--; 1159 - qmgr_disable_irq(queue_ids[port->id].rx); 1128 + qmgr_disable_irq(port->rxq); 1160 1129 netif_stop_queue(dev); 1161 1130 napi_disable(&port->napi); 1162 1131 1163 1132 hss_stop_hdlc(port); 1164 1133 1165 - while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) 1134 + while (queue_get_desc(port->rxfreeq, port, 0) >= 0) 1166 1135 buffs--; 1167 - while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) 1136 + while (queue_get_desc(port->rxq, port, 0) >= 0) 1168 1137 buffs--; 1169 1138 1170 1139 if (buffs) ··· 1172 1141 buffs); 1173 1142 1174 1143 buffs = TX_DESCS; 1175 - while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) 1144 + while (queue_get_desc(port->txq, port, 1) >= 0) 1176 1145 buffs--; /* cancel TX */ 1177 1146 1178 1147 i = 0; 1179 1148 do { 1180 - while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1149 + while (queue_get_desc(port->txreadyq, port, 1) >= 0) 1181 1150 buffs--; 1182 1151 if (!buffs) 1183 1152 break; ··· 1190 1159 if (!buffs) 1191 1160 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); 1192 1161 #endif 1193 - qmgr_disable_irq(queue_ids[port->id].txdone); 1162 + qmgr_disable_irq(port->txdoneq); 1194 1163 1195 - if (port->plat->close) 1196 - port->plat->close(port->id, dev); 1164 + free_irq(gpiod_to_irq(port->dcd), dev); 1165 + /* GPIOs are flagged active low so this de-asserts DTR and RTS */ 1166 + gpiod_set_value(port->dtr, 0); 1167 + gpiod_set_value(port->rts, 0); 1197 1168 spin_unlock_irqrestore(&npe_lock, flags); 1198 1169 1199 1170 destroy_hdlc_queues(port); ··· 1287 1254 } 1288 1255 } 1289 1256 1257 + static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type) 1258 + { 1259 + switch (clock_type) { 1260 + case CLOCK_DEFAULT: 1261 + case CLOCK_EXT: 1262 + gpiod_set_value(port->clk_internal, 0); 1263 + return CLOCK_EXT; 1264 + case CLOCK_INT: 1265 + gpiod_set_value(port->clk_internal, 1); 1266 + return CLOCK_INT; 1267 + default: 1268 + return -EINVAL; 1269 + } 1270 + } 1271 + 1290 1272 static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) 1291 1273 { 1292 1274 const size_t size = sizeof(sync_serial_settings); ··· 1334 1286 return -EFAULT; 1335 1287 1336 1288 clk = new_line.clock_type; 1337 - if (port->plat->set_clock) 1338 - clk = port->plat->set_clock(port->id, clk); 1289 + hss_hdlc_set_clock(port, clk); 1339 1290 1340 1291 if (clk != CLOCK_EXT && clk != CLOCK_INT) 1341 1292 return -EINVAL; /* No such clock setting */ ··· 1344 1297 1345 1298 port->clock_type = clk; /* Update settings */ 1346 1299 if (clk == CLOCK_INT) { 1347 - find_best_clock(port->plat->timer_freq, 1300 + find_best_clock(IXP4XX_TIMER_FREQ, 1348 1301 new_line.clock_rate, 1349 1302 &port->clock_rate, &port->clock_reg); 1350 1303 } else { ··· 1382 1335 .ndo_siocwandev = hss_hdlc_ioctl, 1383 1336 }; 1384 1337 1385 - static int hss_init_one(struct platform_device *pdev) 1338 + static int ixp4xx_hss_probe(struct platform_device *pdev) 1386 1339 { 1340 + struct of_phandle_args queue_spec; 1341 + struct of_phandle_args npe_spec; 1342 + struct device *dev = &pdev->dev; 1343 + struct net_device *ndev; 1344 + struct device_node *np; 1387 1345 struct port *port; 1388 - struct net_device *dev; 1389 1346 hdlc_device *hdlc; 1390 1347 int err; 1391 1348 1392 - port = kzalloc(sizeof(*port), GFP_KERNEL); 1349 + np = dev->of_node; 1350 + 1351 + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1393 1352 if (!port) 1394 1353 return -ENOMEM; 1395 1354 1396 - port->npe = npe_request(0); 1355 + err = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, 1356 + &npe_spec); 1357 + if (err) 1358 + return dev_err_probe(dev, err, "no NPE engine specified\n"); 1359 + /* NPE ID 0x00, 0x10, 0x20... */ 1360 + port->npe = npe_request(npe_spec.args[0] << 4); 1397 1361 if (!port->npe) { 1398 - err = -ENODEV; 1399 - goto err_free; 1362 + dev_err(dev, "unable to obtain NPE instance\n"); 1363 + return -ENODEV; 1400 1364 } 1401 1365 1402 - dev = alloc_hdlcdev(port); 1366 + /* Get the TX ready queue as resource from queue manager */ 1367 + err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-txready", 1, 0, 1368 + &queue_spec); 1369 + if (err) 1370 + return dev_err_probe(dev, err, "no txready queue phandle\n"); 1371 + port->txreadyq = queue_spec.args[0]; 1372 + /* Get the RX trig queue as resource from queue manager */ 1373 + err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-rxtrig", 1, 0, 1374 + &queue_spec); 1375 + if (err) 1376 + return dev_err_probe(dev, err, "no rxtrig queue phandle\n"); 1377 + port->rxtrigq = queue_spec.args[0]; 1378 + /* Get the RX queue as resource from queue manager */ 1379 + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rx", 1, 0, 1380 + &queue_spec); 1381 + if (err) 1382 + return dev_err_probe(dev, err, "no RX queue phandle\n"); 1383 + port->rxq = queue_spec.args[0]; 1384 + /* Get the TX queue as resource from queue manager */ 1385 + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-tx", 1, 0, 1386 + &queue_spec); 1387 + if (err) 1388 + return dev_err_probe(dev, err, "no RX queue phandle\n"); 1389 + port->txq = queue_spec.args[0]; 1390 + /* Get the RX free queue as resource from queue manager */ 1391 + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rxfree", 1, 0, 1392 + &queue_spec); 1393 + if (err) 1394 + return dev_err_probe(dev, err, "no RX free queue phandle\n"); 1395 + port->rxfreeq = queue_spec.args[0]; 1396 + /* Get the TX done queue as resource from queue manager */ 1397 + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-txdone", 1, 0, 1398 + &queue_spec); 1399 + if (err) 1400 + return dev_err_probe(dev, err, "no TX done queue phandle\n"); 1401 + port->txdoneq = queue_spec.args[0]; 1402 + 1403 + /* Obtain all the line control GPIOs */ 1404 + port->cts = devm_gpiod_get(dev, "cts", GPIOD_OUT_LOW); 1405 + if (IS_ERR(port->cts)) 1406 + return dev_err_probe(dev, PTR_ERR(port->cts), "unable to get CTS GPIO\n"); 1407 + port->rts = devm_gpiod_get(dev, "rts", GPIOD_OUT_LOW); 1408 + if (IS_ERR(port->rts)) 1409 + return dev_err_probe(dev, PTR_ERR(port->rts), "unable to get RTS GPIO\n"); 1410 + port->dcd = devm_gpiod_get(dev, "dcd", GPIOD_IN); 1411 + if (IS_ERR(port->dcd)) 1412 + return dev_err_probe(dev, PTR_ERR(port->dcd), "unable to get DCD GPIO\n"); 1413 + port->dtr = devm_gpiod_get(dev, "dtr", GPIOD_OUT_LOW); 1414 + if (IS_ERR(port->dtr)) 1415 + return dev_err_probe(dev, PTR_ERR(port->dtr), "unable to get DTR GPIO\n"); 1416 + port->clk_internal = devm_gpiod_get(dev, "clk-internal", GPIOD_OUT_LOW); 1417 + if (IS_ERR(port->clk_internal)) 1418 + return dev_err_probe(dev, PTR_ERR(port->clk_internal), 1419 + "unable to get CLK internal GPIO\n"); 1420 + 1421 + ndev = alloc_hdlcdev(port); 1403 1422 port->netdev = alloc_hdlcdev(port); 1404 1423 if (!port->netdev) { 1405 1424 err = -ENOMEM; 1406 1425 goto err_plat; 1407 1426 } 1408 1427 1409 - SET_NETDEV_DEV(dev, &pdev->dev); 1410 - hdlc = dev_to_hdlc(dev); 1428 + SET_NETDEV_DEV(ndev, &pdev->dev); 1429 + hdlc = dev_to_hdlc(ndev); 1411 1430 hdlc->attach = hss_hdlc_attach; 1412 1431 hdlc->xmit = hss_hdlc_xmit; 1413 - dev->netdev_ops = &hss_hdlc_ops; 1414 - dev->tx_queue_len = 100; 1432 + ndev->netdev_ops = &hss_hdlc_ops; 1433 + ndev->tx_queue_len = 100; 1415 1434 port->clock_type = CLOCK_EXT; 1416 1435 port->clock_rate = 0; 1417 1436 port->clock_reg = CLK42X_SPEED_2048KHZ; 1418 1437 port->id = pdev->id; 1419 1438 port->dev = &pdev->dev; 1420 - port->plat = pdev->dev.platform_data; 1421 - netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); 1439 + netif_napi_add(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); 1422 1440 1423 - err = register_hdlc_device(dev); 1441 + err = register_hdlc_device(ndev); 1424 1442 if (err) 1425 1443 goto err_free_netdev; 1426 1444 1427 1445 platform_set_drvdata(pdev, port); 1428 1446 1429 - netdev_info(dev, "initialized\n"); 1447 + netdev_info(ndev, "initialized\n"); 1430 1448 return 0; 1431 1449 1432 1450 err_free_netdev: 1433 - free_netdev(dev); 1451 + free_netdev(ndev); 1434 1452 err_plat: 1435 1453 npe_release(port->npe); 1436 - err_free: 1437 - kfree(port); 1438 1454 return err; 1439 1455 } 1440 1456 1441 - static int hss_remove_one(struct platform_device *pdev) 1457 + static int ixp4xx_hss_remove(struct platform_device *pdev) 1442 1458 { 1443 1459 struct port *port = platform_get_drvdata(pdev); 1444 1460 ··· 1514 1404 1515 1405 static struct platform_driver ixp4xx_hss_driver = { 1516 1406 .driver.name = DRV_NAME, 1517 - .probe = hss_init_one, 1518 - .remove = hss_remove_one, 1407 + .probe = ixp4xx_hss_probe, 1408 + .remove = ixp4xx_hss_remove, 1519 1409 }; 1520 1410 1521 1411 static int __init hss_init_module(void)