Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: convert sprintf() to sysfs_emit() for all usages

Convert sprintf() to sysfs_emit() in order to check buffer overrun on sysfs
outputs.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/161894440044.3202472.13926639619695319753.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
8241571f eb15e715

+55 -61
+55 -61
drivers/dma/idxd/sysfs.c
··· 350 350 container_of(dev, struct idxd_engine, conf_dev); 351 351 352 352 if (engine->group) 353 - return sprintf(buf, "%d\n", engine->group->id); 353 + return sysfs_emit(buf, "%d\n", engine->group->id); 354 354 else 355 - return sprintf(buf, "%d\n", -1); 355 + return sysfs_emit(buf, "%d\n", -1); 356 356 } 357 357 358 358 static ssize_t engine_group_id_store(struct device *dev, ··· 447 447 struct idxd_group *group = 448 448 container_of(dev, struct idxd_group, conf_dev); 449 449 450 - return sprintf(buf, "%u\n", group->tokens_reserved); 450 + return sysfs_emit(buf, "%u\n", group->tokens_reserved); 451 451 } 452 452 453 453 static ssize_t group_tokens_reserved_store(struct device *dev, ··· 495 495 struct idxd_group *group = 496 496 container_of(dev, struct idxd_group, conf_dev); 497 497 498 - return sprintf(buf, "%u\n", group->tokens_allowed); 498 + return sysfs_emit(buf, "%u\n", group->tokens_allowed); 499 499 } 500 500 501 501 static ssize_t group_tokens_allowed_store(struct device *dev, ··· 540 540 struct idxd_group *group = 541 541 container_of(dev, struct idxd_group, conf_dev); 542 542 543 - return sprintf(buf, "%u\n", group->use_token_limit); 543 + return sysfs_emit(buf, "%u\n", group->use_token_limit); 544 544 } 545 545 546 546 static ssize_t group_use_token_limit_store(struct device *dev, ··· 583 583 struct idxd_group *group = 584 584 container_of(dev, struct idxd_group, conf_dev); 585 585 int i, rc = 0; 586 - char *tmp = buf; 587 586 struct idxd_device *idxd = group->idxd; 588 587 589 588 for (i = 0; i < idxd->max_engines; i++) { ··· 592 593 continue; 593 594 594 595 if (engine->group->id == group->id) 595 - rc += sprintf(tmp + rc, "engine%d.%d ", 596 - idxd->id, engine->id); 596 + rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); 597 597 } 598 598 599 + if (!rc) 600 + return 0; 599 601 rc--; 600 - rc += sprintf(tmp + rc, "\n"); 602 + rc += sysfs_emit_at(buf, rc, "\n"); 601 603 602 604 return rc; 603 605 } ··· 612 612 struct idxd_group *group = 613 613 container_of(dev, struct idxd_group, conf_dev); 614 614 int i, rc = 0; 615 - char *tmp = buf; 616 615 struct idxd_device *idxd = group->idxd; 617 616 618 617 for (i = 0; i < idxd->max_wqs; i++) { ··· 621 622 continue; 622 623 623 624 if (wq->group->id == group->id) 624 - rc += sprintf(tmp + rc, "wq%d.%d ", 625 - idxd->id, wq->id); 625 + rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); 626 626 } 627 627 628 + if (!rc) 629 + return 0; 628 630 rc--; 629 - rc += sprintf(tmp + rc, "\n"); 631 + rc += sysfs_emit_at(buf, rc, "\n"); 630 632 631 633 return rc; 632 634 } ··· 642 642 struct idxd_group *group = 643 643 container_of(dev, struct idxd_group, conf_dev); 644 644 645 - return sprintf(buf, "%d\n", group->tc_a); 645 + return sysfs_emit(buf, "%d\n", group->tc_a); 646 646 } 647 647 648 648 static ssize_t group_traffic_class_a_store(struct device *dev, ··· 683 683 struct idxd_group *group = 684 684 container_of(dev, struct idxd_group, conf_dev); 685 685 686 - return sprintf(buf, "%d\n", group->tc_b); 686 + return sysfs_emit(buf, "%d\n", group->tc_b); 687 687 } 688 688 689 689 static ssize_t group_traffic_class_b_store(struct device *dev, ··· 756 756 { 757 757 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 758 758 759 - return sprintf(buf, "%d\n", wq->client_count); 759 + return sysfs_emit(buf, "%d\n", wq->client_count); 760 760 } 761 761 762 762 static struct device_attribute dev_attr_wq_clients = ··· 769 769 770 770 switch (wq->state) { 771 771 case IDXD_WQ_DISABLED: 772 - return sprintf(buf, "disabled\n"); 772 + return sysfs_emit(buf, "disabled\n"); 773 773 case IDXD_WQ_ENABLED: 774 - return sprintf(buf, "enabled\n"); 774 + return sysfs_emit(buf, "enabled\n"); 775 775 } 776 776 777 - return sprintf(buf, "unknown\n"); 777 + return sysfs_emit(buf, "unknown\n"); 778 778 } 779 779 780 780 static struct device_attribute dev_attr_wq_state = ··· 786 786 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 787 787 788 788 if (wq->group) 789 - return sprintf(buf, "%u\n", wq->group->id); 789 + return sysfs_emit(buf, "%u\n", wq->group->id); 790 790 else 791 - return sprintf(buf, "-1\n"); 791 + return sysfs_emit(buf, "-1\n"); 792 792 } 793 793 794 794 static ssize_t wq_group_id_store(struct device *dev, ··· 840 840 { 841 841 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 842 842 843 - return sprintf(buf, "%s\n", 844 - wq_dedicated(wq) ? "dedicated" : "shared"); 843 + return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared"); 845 844 } 846 845 847 846 static ssize_t wq_mode_store(struct device *dev, ··· 876 877 { 877 878 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 878 879 879 - return sprintf(buf, "%u\n", wq->size); 880 + return sysfs_emit(buf, "%u\n", wq->size); 880 881 } 881 882 882 883 static int total_claimed_wq_size(struct idxd_device *idxd) ··· 927 928 { 928 929 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 929 930 930 - return sprintf(buf, "%u\n", wq->priority); 931 + return sysfs_emit(buf, "%u\n", wq->priority); 931 932 } 932 933 933 934 static ssize_t wq_priority_store(struct device *dev, ··· 964 965 { 965 966 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 966 967 967 - return sprintf(buf, "%u\n", 968 - test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); 968 + return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); 969 969 } 970 970 971 971 static ssize_t wq_block_on_fault_store(struct device *dev, ··· 1003 1005 { 1004 1006 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1005 1007 1006 - return sprintf(buf, "%u\n", wq->threshold); 1008 + return sysfs_emit(buf, "%u\n", wq->threshold); 1007 1009 } 1008 1010 1009 1011 static ssize_t wq_threshold_store(struct device *dev, ··· 1046 1048 1047 1049 switch (wq->type) { 1048 1050 case IDXD_WQT_KERNEL: 1049 - return sprintf(buf, "%s\n", 1050 - idxd_wq_type_names[IDXD_WQT_KERNEL]); 1051 + return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]); 1051 1052 case IDXD_WQT_USER: 1052 - return sprintf(buf, "%s\n", 1053 - idxd_wq_type_names[IDXD_WQT_USER]); 1053 + return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]); 1054 1054 case IDXD_WQT_NONE: 1055 1055 default: 1056 - return sprintf(buf, "%s\n", 1057 - idxd_wq_type_names[IDXD_WQT_NONE]); 1056 + return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]); 1058 1057 } 1059 1058 1060 1059 return -EINVAL; ··· 1092 1097 { 1093 1098 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1094 1099 1095 - return sprintf(buf, "%s\n", wq->name); 1100 + return sysfs_emit(buf, "%s\n", wq->name); 1096 1101 } 1097 1102 1098 1103 static ssize_t wq_name_store(struct device *dev, ··· 1162 1167 { 1163 1168 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1164 1169 1165 - return sprintf(buf, "%llu\n", wq->max_xfer_bytes); 1170 + return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes); 1166 1171 } 1167 1172 1168 1173 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, ··· 1196 1201 { 1197 1202 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1198 1203 1199 - return sprintf(buf, "%u\n", wq->max_batch_size); 1204 + return sysfs_emit(buf, "%u\n", wq->max_batch_size); 1200 1205 } 1201 1206 1202 1207 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, ··· 1229 1234 { 1230 1235 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1231 1236 1232 - return sprintf(buf, "%u\n", wq->ats_dis); 1237 + return sysfs_emit(buf, "%u\n", wq->ats_dis); 1233 1238 } 1234 1239 1235 1240 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, ··· 1306 1311 struct idxd_device *idxd = 1307 1312 container_of(dev, struct idxd_device, conf_dev); 1308 1313 1309 - return sprintf(buf, "%#x\n", idxd->hw.version); 1314 + return sysfs_emit(buf, "%#x\n", idxd->hw.version); 1310 1315 } 1311 1316 static DEVICE_ATTR_RO(version); 1312 1317 ··· 1317 1322 struct idxd_device *idxd = 1318 1323 container_of(dev, struct idxd_device, conf_dev); 1319 1324 1320 - return sprintf(buf, "%u\n", idxd->max_wq_size); 1325 + return sysfs_emit(buf, "%u\n", idxd->max_wq_size); 1321 1326 } 1322 1327 static DEVICE_ATTR_RO(max_work_queues_size); 1323 1328 ··· 1327 1332 struct idxd_device *idxd = 1328 1333 container_of(dev, struct idxd_device, conf_dev); 1329 1334 1330 - return sprintf(buf, "%u\n", idxd->max_groups); 1335 + return sysfs_emit(buf, "%u\n", idxd->max_groups); 1331 1336 } 1332 1337 static DEVICE_ATTR_RO(max_groups); 1333 1338 ··· 1337 1342 struct idxd_device *idxd = 1338 1343 container_of(dev, struct idxd_device, conf_dev); 1339 1344 1340 - return sprintf(buf, "%u\n", idxd->max_wqs); 1345 + return sysfs_emit(buf, "%u\n", idxd->max_wqs); 1341 1346 } 1342 1347 static DEVICE_ATTR_RO(max_work_queues); 1343 1348 ··· 1347 1352 struct idxd_device *idxd = 1348 1353 container_of(dev, struct idxd_device, conf_dev); 1349 1354 1350 - return sprintf(buf, "%u\n", idxd->max_engines); 1355 + return sysfs_emit(buf, "%u\n", idxd->max_engines); 1351 1356 } 1352 1357 static DEVICE_ATTR_RO(max_engines); 1353 1358 ··· 1357 1362 struct idxd_device *idxd = 1358 1363 container_of(dev, struct idxd_device, conf_dev); 1359 1364 1360 - return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1365 + return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1361 1366 } 1362 1367 static DEVICE_ATTR_RO(numa_node); 1363 1368 ··· 1367 1372 struct idxd_device *idxd = 1368 1373 container_of(dev, struct idxd_device, conf_dev); 1369 1374 1370 - return sprintf(buf, "%u\n", idxd->max_batch_size); 1375 + return sysfs_emit(buf, "%u\n", idxd->max_batch_size); 1371 1376 } 1372 1377 static DEVICE_ATTR_RO(max_batch_size); 1373 1378 ··· 1378 1383 struct idxd_device *idxd = 1379 1384 container_of(dev, struct idxd_device, conf_dev); 1380 1385 1381 - return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); 1386 + return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); 1382 1387 } 1383 1388 static DEVICE_ATTR_RO(max_transfer_size); 1384 1389 ··· 1404 1409 struct idxd_device *idxd = 1405 1410 container_of(dev, struct idxd_device, conf_dev); 1406 1411 1407 - return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1412 + return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1408 1413 } 1409 1414 static DEVICE_ATTR_RO(gen_cap); 1410 1415 ··· 1414 1419 struct idxd_device *idxd = 1415 1420 container_of(dev, struct idxd_device, conf_dev); 1416 1421 1417 - return sprintf(buf, "%u\n", 1418 - test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1422 + return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1419 1423 } 1420 1424 static DEVICE_ATTR_RO(configurable); 1421 1425 ··· 1434 1440 } 1435 1441 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1436 1442 1437 - return sprintf(buf, "%d\n", count); 1443 + return sysfs_emit(buf, "%d\n", count); 1438 1444 } 1439 1445 static DEVICE_ATTR_RO(clients); 1440 1446 ··· 1444 1450 struct idxd_device *idxd = 1445 1451 container_of(dev, struct idxd_device, conf_dev); 1446 1452 1447 - return sprintf(buf, "%u\n", device_pasid_enabled(idxd)); 1453 + return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd)); 1448 1454 } 1449 1455 static DEVICE_ATTR_RO(pasid_enabled); 1450 1456 ··· 1457 1463 switch (idxd->state) { 1458 1464 case IDXD_DEV_DISABLED: 1459 1465 case IDXD_DEV_CONF_READY: 1460 - return sprintf(buf, "disabled\n"); 1466 + return sysfs_emit(buf, "disabled\n"); 1461 1467 case IDXD_DEV_ENABLED: 1462 - return sprintf(buf, "enabled\n"); 1468 + return sysfs_emit(buf, "enabled\n"); 1463 1469 case IDXD_DEV_HALTED: 1464 - return sprintf(buf, "halted\n"); 1470 + return sysfs_emit(buf, "halted\n"); 1465 1471 } 1466 1472 1467 - return sprintf(buf, "unknown\n"); 1473 + return sysfs_emit(buf, "unknown\n"); 1468 1474 } 1469 1475 static DEVICE_ATTR_RO(state); 1470 1476 ··· 1478 1484 1479 1485 spin_lock_irqsave(&idxd->dev_lock, flags); 1480 1486 for (i = 0; i < 4; i++) 1481 - out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); 1487 + out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]); 1482 1488 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1483 1489 out--; 1484 - out += sprintf(buf + out, "\n"); 1490 + out += sysfs_emit_at(buf, out, "\n"); 1485 1491 return out; 1486 1492 } 1487 1493 static DEVICE_ATTR_RO(errors); ··· 1492 1498 struct idxd_device *idxd = 1493 1499 container_of(dev, struct idxd_device, conf_dev); 1494 1500 1495 - return sprintf(buf, "%u\n", idxd->max_tokens); 1501 + return sysfs_emit(buf, "%u\n", idxd->max_tokens); 1496 1502 } 1497 1503 static DEVICE_ATTR_RO(max_tokens); 1498 1504 ··· 1502 1508 struct idxd_device *idxd = 1503 1509 container_of(dev, struct idxd_device, conf_dev); 1504 1510 1505 - return sprintf(buf, "%u\n", idxd->token_limit); 1511 + return sysfs_emit(buf, "%u\n", idxd->token_limit); 1506 1512 } 1507 1513 1508 1514 static ssize_t token_limit_store(struct device *dev, ··· 1541 1547 struct idxd_device *idxd = 1542 1548 container_of(dev, struct idxd_device, conf_dev); 1543 1549 1544 - return sprintf(buf, "%u\n", idxd->major); 1550 + return sysfs_emit(buf, "%u\n", idxd->major); 1545 1551 } 1546 1552 static DEVICE_ATTR_RO(cdev_major); 1547 1553 ··· 1550 1556 { 1551 1557 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); 1552 1558 1553 - return sprintf(buf, "%#x\n", idxd->cmd_status); 1559 + return sysfs_emit(buf, "%#x\n", idxd->cmd_status); 1554 1560 } 1555 1561 static DEVICE_ATTR_RO(cmd_status); 1556 1562