Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ti: icssg-prueth: Add AF_XDP zero copy for RX

Use xsk_pool inside rx_chn to check if a given Rx queue id
is registered for xsk zero copy, which gets populated during
xsk enable.

Update prueth_create_xdp_rxqs to register and support two different
memory models (xsk and page) for a given Rx queue, if registered for
zero copy.

If xsk_pool is registered, allocate buffers from UMEM and map them
to the hardware Rx descriptors. In NAPI context, run the XDP program
for each packet and process the xsk buffer according to the XDP
result codes. Also allocate new set of buffers from UMEM for the
next batch of NAPI Rx processing. Add XDK_WAKEUP_RX support to support
xsk wakeup for Rx.

Move prueth_create_page_pool to prueth_init_rx_chns to avoid freeing
and re-allocating the system memory every time there is a transition
from zero copy to copy and prevents any type of memory fragmentation
or leak.

Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Meghana Malladi <m-malladi@ti.com>
Link: https://patch.msgid.link/20251118135542.380574-6-m-malladi@ti.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Meghana Malladi and committed by
Paolo Abeni
7a64bb38 12113316

+312 -71
+264 -57
drivers/net/ethernet/ti/icssg/icssg_common.c
··· 465 465 } 466 466 EXPORT_SYMBOL_GPL(prueth_init_tx_chns); 467 467 468 + static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, 469 + struct device *dma_dev, 470 + int size) 471 + { 472 + struct page_pool_params pp_params = { 0 }; 473 + struct page_pool *pool; 474 + 475 + pp_params.order = 0; 476 + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 477 + pp_params.pool_size = size; 478 + pp_params.nid = dev_to_node(emac->prueth->dev); 479 + pp_params.dma_dir = DMA_BIDIRECTIONAL; 480 + pp_params.dev = dma_dev; 481 + pp_params.napi = &emac->napi_rx; 482 + pp_params.max_len = PAGE_SIZE; 483 + 484 + pool = page_pool_create(&pp_params); 485 + if (IS_ERR(pool)) 486 + netdev_err(emac->ndev, "cannot create rx page pool\n"); 487 + 488 + return pool; 489 + } 490 + 468 491 int prueth_init_rx_chns(struct prueth_emac *emac, 469 492 struct prueth_rx_chn *rx_chn, 470 493 char *name, u32 max_rflows, ··· 497 474 struct device *dev = emac->prueth->dev; 498 475 struct net_device *ndev = emac->ndev; 499 476 u32 fdqring_id, hdesc_size; 477 + struct page_pool *pool; 500 478 int i, ret = 0, slice; 501 479 int flow_id_base; 502 480 ··· 539 515 netdev_err(ndev, "Failed to create rx pool: %d\n", ret); 540 516 goto fail; 541 517 } 518 + 519 + pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num); 520 + if (IS_ERR(pool)) { 521 + ret = PTR_ERR(pool); 522 + goto fail; 523 + } 524 + 525 + rx_chn->pg_pool = pool; 542 526 543 527 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 544 528 if (emac->is_sr1 && !strcmp(name, "rxmgm")) { ··· 829 797 fallthrough; /* handle aborts by dropping packet */ 830 798 case XDP_DROP: 831 799 ndev->stats.rx_dropped++; 832 - page_pool_recycle_direct(emac->rx_chns.pg_pool, 833 - virt_to_head_page(xdp->data)); 834 800 return ICSSG_XDP_CONSUMED; 835 801 } 802 + } 803 + 804 + static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac, 805 + struct prueth_rx_chn *rx_chn, 806 + struct xdp_buff *xdp) 807 + { 808 + struct net_device *ndev = emac->ndev; 809 + struct cppi5_host_desc_t *desc_rx; 810 + struct prueth_swdata *swdata; 811 + dma_addr_t desc_dma; 812 + dma_addr_t buf_dma; 813 + int buf_len; 814 + 815 + buf_dma = xsk_buff_xdp_get_dma(xdp); 816 + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 817 + if (!desc_rx) { 818 + netdev_err(ndev, "rx push: failed to allocate descriptor\n"); 819 + return -ENOMEM; 820 + } 821 + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 822 + 823 + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 824 + PRUETH_NAV_PS_DATA_SIZE); 825 + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 826 + buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool); 827 + cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); 828 + swdata = cppi5_hdesc_get_swdata(desc_rx); 829 + swdata->type = PRUETH_SWDATA_XSK; 830 + swdata->data.xdp = xdp; 831 + 832 + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, 833 + desc_rx, desc_dma); 834 + } 835 + 836 + static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget) 837 + { 838 + struct prueth_rx_chn *rx_chn = &emac->rx_chns; 839 + struct xdp_buff *xdp; 840 + int i, ret; 841 + 842 + for (i = 0; i < budget; i++) { 843 + xdp = xsk_buff_alloc(rx_chn->xsk_pool); 844 + if (!xdp) 845 + break; 846 + 847 + ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp); 848 + if (ret) { 849 + netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n"); 850 + xsk_buff_free(xdp); 851 + break; 852 + } 853 + } 854 + 855 + return i; 856 + } 857 + 858 + static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata) 859 + { 860 + unsigned int headroom = xdp->data - xdp->data_hard_start; 861 + unsigned int pkt_len = xdp->data_end - xdp->data; 862 + struct net_device *ndev = emac->ndev; 863 + struct sk_buff *skb; 864 + 865 + skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start); 866 + if (unlikely(!skb)) { 867 + ndev->stats.rx_dropped++; 868 + return; 869 + } 870 + 871 + skb_reserve(skb, headroom); 872 + skb_put(skb, pkt_len); 873 + skb->dev = ndev; 874 + 875 + /* RX HW timestamp */ 876 + if (emac->rx_ts_enabled) 877 + emac_rx_timestamp(emac, skb, psdata); 878 + 879 + if (emac->prueth->is_switch_mode) 880 + skb->offload_fwd_mark = emac->offload_fwd_mark; 881 + skb->protocol = eth_type_trans(skb, ndev); 882 + 883 + skb_mark_for_recycle(skb); 884 + napi_gro_receive(&emac->napi_rx, skb); 885 + ndev->stats.rx_bytes += pkt_len; 886 + ndev->stats.rx_packets++; 887 + } 888 + 889 + static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id, 890 + int budget) 891 + { 892 + struct prueth_rx_chn *rx_chn = &emac->rx_chns; 893 + u32 buf_dma_len, pkt_len, port_id = 0; 894 + struct net_device *ndev = emac->ndev; 895 + struct cppi5_host_desc_t *desc_rx; 896 + struct prueth_swdata *swdata; 897 + dma_addr_t desc_dma, buf_dma; 898 + struct xdp_buff *xdp; 899 + int xdp_status = 0; 900 + int count = 0; 901 + u32 *psdata; 902 + int ret; 903 + 904 + while (count < budget) { 905 + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); 906 + if (ret) { 907 + if (ret != -ENODATA) 908 + netdev_err(ndev, "rx pop: failed: %d\n", ret); 909 + break; 910 + } 911 + 912 + if (cppi5_desc_is_tdcm(desc_dma)) { 913 + complete(&emac->tdown_complete); 914 + break; 915 + } 916 + 917 + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 918 + swdata = cppi5_hdesc_get_swdata(desc_rx); 919 + if (swdata->type != PRUETH_SWDATA_XSK) { 920 + netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); 921 + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 922 + break; 923 + } 924 + 925 + xdp = swdata->data.xdp; 926 + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 927 + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 928 + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 929 + /* firmware adds 4 CRC bytes, strip them */ 930 + pkt_len -= 4; 931 + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 932 + psdata = cppi5_hdesc_get_psdata(desc_rx); 933 + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 934 + count++; 935 + xsk_buff_set_size(xdp, pkt_len); 936 + xsk_buff_dma_sync_for_cpu(xdp); 937 + 938 + if (prueth_xdp_is_enabled(emac)) { 939 + ret = emac_run_xdp(emac, xdp, &pkt_len); 940 + switch (ret) { 941 + case ICSSG_XDP_PASS: 942 + /* prepare skb and send to n/w stack */ 943 + emac_dispatch_skb_zc(emac, xdp, psdata); 944 + xsk_buff_free(xdp); 945 + break; 946 + case ICSSG_XDP_CONSUMED: 947 + xsk_buff_free(xdp); 948 + break; 949 + case ICSSG_XDP_TX: 950 + case ICSSG_XDP_REDIR: 951 + xdp_status |= ret; 952 + break; 953 + } 954 + } else { 955 + /* prepare skb and send to n/w stack */ 956 + emac_dispatch_skb_zc(emac, xdp, psdata); 957 + xsk_buff_free(xdp); 958 + } 959 + } 960 + 961 + if (xdp_status & ICSSG_XDP_REDIR) 962 + xdp_do_flush(); 963 + 964 + /* Allocate xsk buffers from the pool for the "count" number of 965 + * packets processed in order to be able to receive more packets. 966 + */ 967 + ret = prueth_rx_alloc_zc(emac, count); 968 + 969 + if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) { 970 + /* If the user space doesn't provide enough buffers then it must 971 + * explicitly wake up the kernel when new buffers are available 972 + */ 973 + if (ret < count) 974 + xsk_set_rx_need_wakeup(rx_chn->xsk_pool); 975 + else 976 + xsk_clear_rx_need_wakeup(rx_chn->xsk_pool); 977 + } 978 + 979 + return count; 836 980 } 837 981 838 982 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) ··· 1057 849 /* firmware adds 4 CRC bytes, strip them */ 1058 850 pkt_len -= 4; 1059 851 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 1060 - 1061 852 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 1062 853 1063 854 /* if allocation fails we drop the packet but push the ··· 1128 921 struct cppi5_host_desc_t *desc_rx; 1129 922 struct prueth_swdata *swdata; 1130 923 struct page_pool *pool; 924 + struct xdp_buff *xdp; 1131 925 struct page *page; 1132 926 1133 927 pool = rx_chn->pg_pool; 1134 928 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 1135 929 swdata = cppi5_hdesc_get_swdata(desc_rx); 1136 - if (swdata->type == PRUETH_SWDATA_PAGE) { 930 + if (rx_chn->xsk_pool) { 931 + xdp = swdata->data.xdp; 932 + xsk_buff_free(xdp); 933 + } else { 1137 934 page = swdata->data.page; 1138 935 page_pool_recycle_direct(pool, page); 1139 936 } ··· 1385 1174 { 1386 1175 struct prueth_emac *emac = dev_id; 1387 1176 1177 + emac->rx_chns.irq_disabled = true; 1388 1178 disable_irq_nosync(irq); 1389 1179 napi_schedule(&emac->napi_rx); 1390 1180 ··· 1413 1201 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; 1414 1202 int flow = emac->is_sr1 ? 1415 1203 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; 1204 + struct prueth_rx_chn *rx_chn = &emac->rx_chns; 1416 1205 int xdp_state_or = 0; 1417 1206 int num_rx = 0; 1418 1207 int cur_budget; ··· 1421 1208 int ret; 1422 1209 1423 1210 while (flow--) { 1424 - cur_budget = budget - num_rx; 1211 + if (rx_chn->xsk_pool) { 1212 + num_rx = emac_rx_packet_zc(emac, flow, budget); 1213 + } else { 1214 + cur_budget = budget - num_rx; 1425 1215 1426 - while (cur_budget--) { 1427 - ret = emac_rx_packet(emac, flow, &xdp_state); 1428 - xdp_state_or |= xdp_state; 1429 - if (ret) 1430 - break; 1431 - num_rx++; 1216 + while (cur_budget--) { 1217 + ret = emac_rx_packet(emac, flow, &xdp_state); 1218 + xdp_state_or |= xdp_state; 1219 + if (ret) 1220 + break; 1221 + num_rx++; 1222 + } 1432 1223 } 1433 1224 1434 1225 if (num_rx >= budget) ··· 1448 1231 ns_to_ktime(emac->rx_pace_timeout_ns), 1449 1232 HRTIMER_MODE_REL_PINNED); 1450 1233 } else { 1451 - enable_irq(emac->rx_chns.irq[rx_flow]); 1234 + if (emac->rx_chns.irq_disabled) { 1235 + /* re-enable the RX IRQ */ 1236 + emac->rx_chns.irq_disabled = false; 1237 + enable_irq(emac->rx_chns.irq[rx_flow]); 1238 + } 1452 1239 } 1453 1240 } 1454 1241 ··· 1460 1239 } 1461 1240 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); 1462 1241 1463 - static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, 1464 - struct device *dma_dev, 1465 - int size) 1466 - { 1467 - struct page_pool_params pp_params = { 0 }; 1468 - struct page_pool *pool; 1469 - 1470 - pp_params.order = 0; 1471 - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1472 - pp_params.pool_size = size; 1473 - pp_params.nid = dev_to_node(emac->prueth->dev); 1474 - pp_params.dma_dir = DMA_BIDIRECTIONAL; 1475 - pp_params.dev = dma_dev; 1476 - pp_params.napi = &emac->napi_rx; 1477 - pp_params.max_len = PAGE_SIZE; 1478 - 1479 - pool = page_pool_create(&pp_params); 1480 - if (IS_ERR(pool)) 1481 - netdev_err(emac->ndev, "cannot create rx page pool\n"); 1482 - 1483 - return pool; 1484 - } 1485 - 1486 1242 int prueth_prepare_rx_chan(struct prueth_emac *emac, 1487 1243 struct prueth_rx_chn *chn, 1488 1244 int buf_size) 1489 1245 { 1490 - struct page_pool *pool; 1491 1246 struct page *page; 1247 + int desc_avail; 1492 1248 int i, ret; 1493 1249 1494 - pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); 1495 - if (IS_ERR(pool)) 1496 - return PTR_ERR(pool); 1250 + desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool); 1251 + if (desc_avail < chn->descs_num) 1252 + netdev_warn(emac->ndev, 1253 + "not enough RX descriptors available %d < %d\n", 1254 + desc_avail, chn->descs_num); 1497 1255 1498 - chn->pg_pool = pool; 1499 - 1500 - for (i = 0; i < chn->descs_num; i++) { 1501 - /* NOTE: we're not using memory efficiently here. 1502 - * 1 full page (4KB?) used here instead of 1503 - * PRUETH_MAX_PKT_SIZE (~1.5KB?) 1256 + if (chn->xsk_pool) { 1257 + /* get pages from xsk_pool and push to RX ring 1258 + * queue as much as possible 1504 1259 */ 1505 - page = page_pool_dev_alloc_pages(pool); 1506 - if (!page) { 1507 - netdev_err(emac->ndev, "couldn't allocate rx page\n"); 1508 - ret = -ENOMEM; 1260 + ret = prueth_rx_alloc_zc(emac, desc_avail); 1261 + if (!ret) 1509 1262 goto recycle_alloc_pg; 1510 - } 1263 + } else { 1264 + for (i = 0; i < desc_avail; i++) { 1265 + /* NOTE: we're not using memory efficiently here. 1266 + * 1 full page (4KB?) used here instead of 1267 + * PRUETH_MAX_PKT_SIZE (~1.5KB?) 1268 + */ 1269 + page = page_pool_dev_alloc_pages(chn->pg_pool); 1270 + if (!page) { 1271 + netdev_err(emac->ndev, "couldn't allocate rx page\n"); 1272 + ret = -ENOMEM; 1273 + goto recycle_alloc_pg; 1274 + } 1511 1275 1512 - ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); 1513 - if (ret < 0) { 1514 - netdev_err(emac->ndev, 1515 - "cannot submit page for rx chan %s ret %d\n", 1516 - chn->name, ret); 1517 - page_pool_recycle_direct(pool, page); 1518 - goto recycle_alloc_pg; 1276 + ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); 1277 + if (ret < 0) { 1278 + netdev_err(emac->ndev, 1279 + "cannot submit page for rx chan %s ret %d\n", 1280 + chn->name, ret); 1281 + page_pool_recycle_direct(chn->pg_pool, page); 1282 + goto recycle_alloc_pg; 1283 + } 1519 1284 } 1520 1285 } 1521 1286
+46 -14
drivers/net/ethernet/ti/icssg/icssg_prueth.c
··· 395 395 container_of(timer, struct prueth_emac, rx_hrtimer); 396 396 int rx_flow = PRUETH_RX_FLOW_DATA; 397 397 398 - enable_irq(emac->rx_chns.irq[rx_flow]); 398 + if (emac->rx_chns.irq_disabled) { 399 + /* re-enable the RX IRQ */ 400 + emac->rx_chns.irq_disabled = false; 401 + enable_irq(emac->rx_chns.irq[rx_flow]); 402 + } 399 403 return HRTIMER_NORESTART; 400 404 } 401 405 ··· 573 569 .perout_enable = prueth_perout_enable, 574 570 }; 575 571 572 + static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) 573 + { 574 + struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 575 + 576 + if (xdp_rxq_info_is_reg(rxq)) 577 + xdp_rxq_info_unreg(rxq); 578 + } 579 + 576 580 static int prueth_create_xdp_rxqs(struct prueth_emac *emac) 577 581 { 578 582 struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 579 583 struct page_pool *pool = emac->rx_chns.pg_pool; 584 + struct prueth_rx_chn *rx_chn = &emac->rx_chns; 580 585 int ret; 581 586 582 587 ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id); 583 588 if (ret) 584 589 return ret; 585 590 586 - ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); 587 - if (ret) 588 - xdp_rxq_info_unreg(rxq); 591 + if (rx_chn->xsk_pool) { 592 + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); 593 + if (ret) 594 + goto xdp_unreg; 595 + xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq); 596 + } else { 597 + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); 598 + if (ret) 599 + goto xdp_unreg; 600 + } 589 601 602 + return 0; 603 + 604 + xdp_unreg: 605 + prueth_destroy_xdp_rxqs(emac); 590 606 return ret; 591 - } 592 - 593 - static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) 594 - { 595 - struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; 596 - 597 - if (!xdp_rxq_info_is_reg(rxq)) 598 - return; 599 - 600 - xdp_rxq_info_unreg(rxq); 601 607 } 602 608 603 609 static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) ··· 1363 1349 { 1364 1350 struct prueth_emac *emac = netdev_priv(ndev); 1365 1351 struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid]; 1352 + struct prueth_rx_chn *rx_chn = &emac->rx_chns; 1353 + 1354 + if (emac->xsk_qid != qid) { 1355 + netdev_err(ndev, "XSK queue %d not registered\n", qid); 1356 + return -EINVAL; 1357 + } 1366 1358 1367 1359 if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) { 1368 1360 netdev_err(ndev, "Invalid XSK queue ID %d\n", qid); ··· 1380 1360 return -EINVAL; 1381 1361 } 1382 1362 1363 + if (!rx_chn->xsk_pool) { 1364 + netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid); 1365 + return -EINVAL; 1366 + } 1367 + 1383 1368 if (flags & XDP_WAKEUP_TX) { 1384 1369 if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) { 1385 1370 if (likely(napi_schedule_prep(&tx_chn->napi_tx))) 1386 1371 __napi_schedule(&tx_chn->napi_tx); 1372 + } 1373 + } 1374 + 1375 + if (flags & XDP_WAKEUP_RX) { 1376 + if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) { 1377 + if (likely(napi_schedule_prep(&emac->napi_rx))) 1378 + __napi_schedule(&emac->napi_rx); 1387 1379 } 1388 1380 } 1389 1381
+2
drivers/net/ethernet/ti/icssg/icssg_prueth.h
··· 143 143 struct page_pool *pg_pool; 144 144 struct xdp_rxq_info xdp_rxq; 145 145 struct xsk_buff_pool *xsk_pool; 146 + bool irq_disabled; 146 147 }; 147 148 148 149 enum prueth_swdata_type { ··· 167 166 struct page *page; 168 167 u32 cmd; 169 168 struct xdp_frame *xdpf; 169 + struct xdp_buff *xdp; 170 170 } data; 171 171 }; 172 172