Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

pktcdvd: Get rid of custom printing macros

We may use traditional dev_*() macros instead of custom ones
provided by the driver.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Link: https://lore.kernel.org/r/20230310164549.22133-2-andriy.shevchenko@linux.intel.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Andy Shevchenko and committed by
Jens Axboe
3a41db53 1341c7d2

+130 -119
+129 -118
drivers/block/pktcdvd.c
··· 72 72 73 73 #define DRIVER_NAME "pktcdvd" 74 74 75 - #define pkt_err(pd, fmt, ...) \ 76 - pr_err("%s: " fmt, pd->name, ##__VA_ARGS__) 77 - #define pkt_notice(pd, fmt, ...) \ 78 - pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__) 79 - #define pkt_info(pd, fmt, ...) \ 80 - pr_info("%s: " fmt, pd->name, ##__VA_ARGS__) 81 - 82 - #define pkt_dbg(level, pd, fmt, ...) \ 83 - do { \ 84 - if (level == 2 && PACKET_DEBUG >= 2) \ 85 - pr_notice("%s: %s():" fmt, \ 86 - pd->name, __func__, ##__VA_ARGS__); \ 87 - else if (level == 1 && PACKET_DEBUG >= 1) \ 88 - pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \ 89 - } while (0) 90 - 91 75 #define MAX_SPEED 0xffff 92 76 93 77 static DEFINE_MUTEX(pktcdvd_mutex); ··· 303 319 if (class_is_registered(&class_pktcdvd)) { 304 320 pd->dev = device_create_with_groups(&class_pktcdvd, NULL, 305 321 MKDEV(0, 0), pd, pkt_groups, 306 - "%s", pd->name); 322 + "%s", pd->disk->disk_name); 307 323 if (IS_ERR(pd->dev)) 308 324 pd->dev = NULL; 309 325 } ··· 334 350 if (!pd) 335 351 continue; 336 352 n += sprintf(data+n, "%s %u:%u %u:%u\n", 337 - pd->name, 353 + pd->disk->disk_name, 338 354 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), 339 355 MAJOR(pd->bdev->bd_dev), 340 356 MINOR(pd->bdev->bd_dev)); ··· 434 450 { 435 451 if (!pkt_debugfs_root) 436 452 return; 437 - pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); 453 + pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); 438 454 if (!pd->dfs_d_root) 439 455 return; 440 456 ··· 468 484 469 485 static void pkt_bio_finished(struct pktcdvd_device *pd) 470 486 { 487 + struct device *ddev = disk_to_dev(pd->disk); 488 + 471 489 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); 472 490 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { 473 - pkt_dbg(2, pd, "queue empty\n"); 491 + dev_dbg(ddev, "queue empty\n"); 474 492 atomic_set(&pd->iosched.attention, 1); 475 493 wake_up(&pd->wqueue); 476 494 } ··· 703 717 static void pkt_dump_sense(struct pktcdvd_device *pd, 704 718 struct packet_command *cgc) 705 719 { 720 + struct device *ddev = disk_to_dev(pd->disk); 706 721 struct scsi_sense_hdr *sshdr = cgc->sshdr; 707 722 708 723 if (sshdr) 709 - pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", 724 + dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n", 710 725 CDROM_PACKET_SIZE, cgc->cmd, 711 726 sshdr->sense_key, sshdr->asc, sshdr->ascq, 712 727 sense_key_string(sshdr->sense_key)); 713 728 else 714 - pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); 729 + dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); 715 730 } 716 731 717 732 /* ··· 796 809 */ 797 810 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) 798 811 { 812 + struct device *ddev = disk_to_dev(pd->disk); 799 813 800 814 if (atomic_read(&pd->iosched.attention) == 0) 801 815 return; ··· 824 836 need_write_seek = 0; 825 837 if (need_write_seek && reads_queued) { 826 838 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 827 - pkt_dbg(2, pd, "write, waiting\n"); 839 + dev_dbg(ddev, "write, waiting\n"); 828 840 break; 829 841 } 830 842 pkt_flush_cache(pd); ··· 833 845 } else { 834 846 if (!reads_queued && writes_queued) { 835 847 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 836 - pkt_dbg(2, pd, "read, waiting\n"); 848 + dev_dbg(ddev, "read, waiting\n"); 837 849 break; 838 850 } 839 851 pd->iosched.writing = 1; ··· 880 892 */ 881 893 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 882 894 { 895 + struct device *ddev = disk_to_dev(pd->disk); 896 + 883 897 if ((pd->settings.size << 9) / CD_FRAMESIZE 884 898 <= queue_max_segments(q)) { 885 899 /* ··· 898 908 set_bit(PACKET_MERGE_SEGS, &pd->flags); 899 909 return 0; 900 910 } else { 901 - pkt_err(pd, "cdrom max_phys_segments too small\n"); 911 + dev_err(ddev, "cdrom max_phys_segments too small\n"); 902 912 return -EIO; 903 913 } 904 914 } ··· 909 919 struct pktcdvd_device *pd = pkt->pd; 910 920 BUG_ON(!pd); 911 921 912 - pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 922 + dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n", 913 923 bio, (unsigned long long)pkt->sector, 914 924 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); 915 925 ··· 929 939 struct pktcdvd_device *pd = pkt->pd; 930 940 BUG_ON(!pd); 931 941 932 - pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); 942 + dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status); 933 943 934 944 pd->stats.pkt_ended++; 935 945 ··· 945 955 */ 946 956 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) 947 957 { 958 + struct device *ddev = disk_to_dev(pd->disk); 948 959 int frames_read = 0; 949 960 struct bio *bio; 950 961 int f; ··· 974 983 spin_unlock(&pkt->lock); 975 984 976 985 if (pkt->cache_valid) { 977 - pkt_dbg(2, pd, "zone %llx cached\n", 978 - (unsigned long long)pkt->sector); 986 + dev_dbg(ddev, "zone %llx cached\n", (unsigned long long)pkt->sector); 979 987 goto out_account; 980 988 } 981 989 ··· 995 1005 996 1006 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 997 1007 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 998 - pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", 999 - f, pkt->pages[p], offset); 1008 + dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f, 1009 + pkt->pages[p], offset); 1000 1010 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) 1001 1011 BUG(); 1002 1012 ··· 1006 1016 } 1007 1017 1008 1018 out_account: 1009 - pkt_dbg(2, pd, "need %d frames for zone %llx\n", 1010 - frames_read, (unsigned long long)pkt->sector); 1019 + dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, 1020 + (unsigned long long)pkt->sector); 1011 1021 pd->stats.pkt_started++; 1012 1022 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); 1013 1023 } ··· 1041 1051 } 1042 1052 } 1043 1053 1044 - static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) 1054 + static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt, 1055 + enum packet_data_state state) 1045 1056 { 1046 - #if PACKET_DEBUG > 1 1047 1057 static const char *state_name[] = { 1048 1058 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" 1049 1059 }; 1050 1060 enum packet_data_state old_state = pkt->state; 1051 - pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", 1061 + 1062 + dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n", 1052 1063 pkt->id, (unsigned long long)pkt->sector, 1053 1064 state_name[old_state], state_name[state]); 1054 - #endif 1065 + 1055 1066 pkt->state = state; 1056 1067 } 1057 1068 ··· 1062 1071 */ 1063 1072 static int pkt_handle_queue(struct pktcdvd_device *pd) 1064 1073 { 1074 + struct device *ddev = disk_to_dev(pd->disk); 1065 1075 struct packet_data *pkt, *p; 1066 1076 struct bio *bio = NULL; 1067 1077 sector_t zone = 0; /* Suppress gcc warning */ ··· 1072 1080 atomic_set(&pd->scan_queue, 0); 1073 1081 1074 1082 if (list_empty(&pd->cdrw.pkt_free_list)) { 1075 - pkt_dbg(2, pd, "no pkt\n"); 1083 + dev_dbg(ddev, "no pkt\n"); 1076 1084 return 0; 1077 1085 } 1078 1086 ··· 1109 1117 } 1110 1118 spin_unlock(&pd->lock); 1111 1119 if (!bio) { 1112 - pkt_dbg(2, pd, "no bio\n"); 1120 + dev_dbg(ddev, "no bio\n"); 1113 1121 return 0; 1114 1122 } 1115 1123 ··· 1125 1133 * to this packet. 1126 1134 */ 1127 1135 spin_lock(&pd->lock); 1128 - pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1136 + dev_dbg(ddev, "looking for zone %llx\n", (unsigned long long)zone); 1129 1137 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1138 + sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd); 1139 + 1130 1140 bio = node->bio; 1131 - pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) 1132 - get_zone(bio->bi_iter.bi_sector, pd)); 1133 - if (get_zone(bio->bi_iter.bi_sector, pd) != zone) 1141 + dev_dbg(ddev, "found zone=%llx\n", (unsigned long long)tmp); 1142 + if (tmp != zone) 1134 1143 break; 1135 1144 pkt_rbtree_erase(pd, node); 1136 1145 spin_lock(&pkt->lock); ··· 1150 1157 spin_unlock(&pd->lock); 1151 1158 1152 1159 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); 1153 - pkt_set_state(pkt, PACKET_WAITING_STATE); 1160 + pkt_set_state(ddev, pkt, PACKET_WAITING_STATE); 1154 1161 atomic_set(&pkt->run_sm, 1); 1155 1162 1156 1163 spin_lock(&pd->cdrw.active_list_lock); ··· 1202 1209 */ 1203 1210 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) 1204 1211 { 1212 + struct device *ddev = disk_to_dev(pd->disk); 1205 1213 int f; 1206 1214 1207 1215 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames, ··· 1219 1225 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) 1220 1226 BUG(); 1221 1227 } 1222 - pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); 1228 + dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt); 1223 1229 1224 1230 /* 1225 1231 * Fill-in bvec with data from orig_bios. ··· 1227 1233 spin_lock(&pkt->lock); 1228 1234 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); 1229 1235 1230 - pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); 1236 + pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE); 1231 1237 spin_unlock(&pkt->lock); 1232 1238 1233 - pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", 1234 - pkt->write_size, (unsigned long long)pkt->sector); 1239 + dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, 1240 + (unsigned long long)pkt->sector); 1235 1241 1236 1242 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) 1237 1243 pkt->cache_valid = 1; ··· 1259 1265 1260 1266 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) 1261 1267 { 1262 - pkt_dbg(2, pd, "pkt %d\n", pkt->id); 1268 + struct device *ddev = disk_to_dev(pd->disk); 1269 + 1270 + dev_dbg(ddev, "pkt %d\n", pkt->id); 1263 1271 1264 1272 for (;;) { 1265 1273 switch (pkt->state) { ··· 1271 1275 1272 1276 pkt->sleep_time = 0; 1273 1277 pkt_gather_data(pd, pkt); 1274 - pkt_set_state(pkt, PACKET_READ_WAIT_STATE); 1278 + pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE); 1275 1279 break; 1276 1280 1277 1281 case PACKET_READ_WAIT_STATE: ··· 1279 1283 return; 1280 1284 1281 1285 if (atomic_read(&pkt->io_errors) > 0) { 1282 - pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1286 + pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); 1283 1287 } else { 1284 1288 pkt_start_write(pd, pkt); 1285 1289 } ··· 1290 1294 return; 1291 1295 1292 1296 if (!pkt->w_bio->bi_status) { 1293 - pkt_set_state(pkt, PACKET_FINISHED_STATE); 1297 + pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); 1294 1298 } else { 1295 - pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1299 + pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); 1296 1300 } 1297 1301 break; 1298 1302 1299 1303 case PACKET_RECOVERY_STATE: 1300 - pkt_dbg(2, pd, "No recovery possible\n"); 1301 - pkt_set_state(pkt, PACKET_FINISHED_STATE); 1304 + dev_dbg(ddev, "No recovery possible\n"); 1305 + pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); 1302 1306 break; 1303 1307 1304 1308 case PACKET_FINISHED_STATE: ··· 1314 1318 1315 1319 static void pkt_handle_packets(struct pktcdvd_device *pd) 1316 1320 { 1321 + struct device *ddev = disk_to_dev(pd->disk); 1317 1322 struct packet_data *pkt, *next; 1318 1323 1319 1324 /* ··· 1335 1338 if (pkt->state == PACKET_FINISHED_STATE) { 1336 1339 list_del(&pkt->list); 1337 1340 pkt_put_packet_data(pd, pkt); 1338 - pkt_set_state(pkt, PACKET_IDLE_STATE); 1341 + pkt_set_state(ddev, pkt, PACKET_IDLE_STATE); 1339 1342 atomic_set(&pd->scan_queue, 1); 1340 1343 } 1341 1344 } ··· 1364 1367 static int kcdrwd(void *foobar) 1365 1368 { 1366 1369 struct pktcdvd_device *pd = foobar; 1370 + struct device *ddev = disk_to_dev(pd->disk); 1367 1371 struct packet_data *pkt; 1372 + int states[PACKET_NUM_STATES]; 1368 1373 long min_sleep_time, residue; 1369 1374 1370 1375 set_user_nice(current, MIN_NICE); ··· 1397 1398 goto work_to_do; 1398 1399 1399 1400 /* Otherwise, go to sleep */ 1400 - if (PACKET_DEBUG > 1) { 1401 - int states[PACKET_NUM_STATES]; 1402 - pkt_count_states(pd, states); 1403 - pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 1404 - states[0], states[1], states[2], 1405 - states[3], states[4], states[5]); 1406 - } 1401 + pkt_count_states(pd, states); 1402 + dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 1403 + states[0], states[1], states[2], states[3], states[4], states[5]); 1407 1404 1408 1405 min_sleep_time = MAX_SCHEDULE_TIMEOUT; 1409 1406 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { ··· 1407 1412 min_sleep_time = pkt->sleep_time; 1408 1413 } 1409 1414 1410 - pkt_dbg(2, pd, "sleeping\n"); 1415 + dev_dbg(ddev, "sleeping\n"); 1411 1416 residue = schedule_timeout(min_sleep_time); 1412 - pkt_dbg(2, pd, "wake up\n"); 1417 + dev_dbg(ddev, "wake up\n"); 1413 1418 1414 1419 /* make swsusp happy with our thread */ 1415 1420 try_to_freeze(); ··· 1457 1462 1458 1463 static void pkt_print_settings(struct pktcdvd_device *pd) 1459 1464 { 1460 - pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", 1465 + dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n", 1461 1466 pd->settings.fp ? "Fixed" : "Variable", 1462 1467 pd->settings.size >> 2, 1463 1468 pd->settings.block_mode == 8 ? '1' : '2'); ··· 1585 1590 */ 1586 1591 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) 1587 1592 { 1593 + struct device *ddev = disk_to_dev(pd->disk); 1588 1594 struct packet_command cgc; 1589 1595 struct scsi_sense_hdr sshdr; 1590 1596 write_param_page *wp; ··· 1652 1656 /* 1653 1657 * paranoia 1654 1658 */ 1655 - pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); 1659 + dev_err(ddev, "write mode wrong %d\n", wp->data_block_type); 1656 1660 return 1; 1657 1661 } 1658 1662 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); ··· 1673 1677 */ 1674 1678 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) 1675 1679 { 1680 + struct device *ddev = disk_to_dev(pd->disk); 1681 + 1676 1682 switch (pd->mmc3_profile) { 1677 1683 case 0x1a: /* DVD+RW */ 1678 1684 case 0x12: /* DVD-RAM */ ··· 1699 1701 if (ti->rt == 1 && ti->blank == 0) 1700 1702 return 1; 1701 1703 1702 - pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1704 + dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1703 1705 return 0; 1704 1706 } 1705 1707 ··· 1708 1710 */ 1709 1711 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) 1710 1712 { 1713 + struct device *ddev = disk_to_dev(pd->disk); 1714 + 1711 1715 switch (pd->mmc3_profile) { 1712 1716 case 0x0a: /* CD-RW */ 1713 1717 case 0xffff: /* MMC3 not supported */ ··· 1719 1719 case 0x12: /* DVD-RAM */ 1720 1720 return 1; 1721 1721 default: 1722 - pkt_dbg(2, pd, "Wrong disc profile (%x)\n", 1723 - pd->mmc3_profile); 1722 + dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile); 1724 1723 return 0; 1725 1724 } 1726 1725 ··· 1728 1729 * but i'm not sure, should we leave this to user apps? probably. 1729 1730 */ 1730 1731 if (di->disc_type == 0xff) { 1731 - pkt_notice(pd, "unknown disc - no track?\n"); 1732 + dev_notice(ddev, "unknown disc - no track?\n"); 1732 1733 return 0; 1733 1734 } 1734 1735 1735 1736 if (di->disc_type != 0x20 && di->disc_type != 0) { 1736 - pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); 1737 + dev_err(ddev, "wrong disc type (%x)\n", di->disc_type); 1737 1738 return 0; 1738 1739 } 1739 1740 1740 1741 if (di->erasable == 0) { 1741 - pkt_notice(pd, "disc not erasable\n"); 1742 + dev_err(ddev, "disc not erasable\n"); 1742 1743 return 0; 1743 1744 } 1744 1745 1745 1746 if (di->border_status == PACKET_SESSION_RESERVED) { 1746 - pkt_err(pd, "can't write to last track (reserved)\n"); 1747 + dev_err(ddev, "can't write to last track (reserved)\n"); 1747 1748 return 0; 1748 1749 } 1749 1750 ··· 1752 1753 1753 1754 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) 1754 1755 { 1756 + struct device *ddev = disk_to_dev(pd->disk); 1755 1757 struct packet_command cgc; 1756 1758 unsigned char buf[12]; 1757 1759 disc_information di; ··· 1770 1770 1771 1771 ret = pkt_get_disc_info(pd, &di); 1772 1772 if (ret) { 1773 - pkt_err(pd, "failed get_disc\n"); 1773 + dev_err(ddev, "failed get_disc\n"); 1774 1774 return ret; 1775 1775 } 1776 1776 ··· 1782 1782 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ 1783 1783 ret = pkt_get_track_info(pd, track, 1, &ti); 1784 1784 if (ret) { 1785 - pkt_err(pd, "failed get_track\n"); 1785 + dev_err(ddev, "failed get_track\n"); 1786 1786 return ret; 1787 1787 } 1788 1788 1789 1789 if (!pkt_writable_track(pd, &ti)) { 1790 - pkt_err(pd, "can't write to this track\n"); 1790 + dev_err(ddev, "can't write to this track\n"); 1791 1791 return -EROFS; 1792 1792 } 1793 1793 ··· 1797 1797 */ 1798 1798 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1799 1799 if (pd->settings.size == 0) { 1800 - pkt_notice(pd, "detected zero packet size!\n"); 1800 + dev_notice(ddev, "detected zero packet size!\n"); 1801 1801 return -ENXIO; 1802 1802 } 1803 1803 if (pd->settings.size > PACKET_MAX_SECTORS) { 1804 - pkt_err(pd, "packet size is too big\n"); 1804 + dev_err(ddev, "packet size is too big\n"); 1805 1805 return -EROFS; 1806 1806 } 1807 1807 pd->settings.fp = ti.fp; ··· 1843 1843 pd->settings.block_mode = PACKET_BLOCK_MODE2; 1844 1844 break; 1845 1845 default: 1846 - pkt_err(pd, "unknown data mode\n"); 1846 + dev_err(ddev, "unknown data mode\n"); 1847 1847 return -EROFS; 1848 1848 } 1849 1849 return 0; ··· 1854 1854 */ 1855 1855 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) 1856 1856 { 1857 + struct device *ddev = disk_to_dev(pd->disk); 1857 1858 struct packet_command cgc; 1858 1859 struct scsi_sense_hdr sshdr; 1859 1860 unsigned char buf[64]; ··· 1884 1883 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); 1885 1884 ret = pkt_mode_select(pd, &cgc); 1886 1885 if (ret) { 1887 - pkt_err(pd, "write caching control failed\n"); 1886 + dev_err(ddev, "write caching control failed\n"); 1888 1887 pkt_dump_sense(pd, &cgc); 1889 1888 } else if (!ret && set) 1890 - pkt_notice(pd, "enabled write caching\n"); 1889 + dev_notice(ddev, "enabled write caching\n"); 1891 1890 return ret; 1892 1891 } 1893 1892 ··· 1968 1967 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, 1969 1968 unsigned *speed) 1970 1969 { 1970 + struct device *ddev = disk_to_dev(pd->disk); 1971 1971 struct packet_command cgc; 1972 1972 struct scsi_sense_hdr sshdr; 1973 1973 unsigned char buf[64]; ··· 2003 2001 } 2004 2002 2005 2003 if (!(buf[6] & 0x40)) { 2006 - pkt_notice(pd, "disc type is not CD-RW\n"); 2004 + dev_notice(ddev, "disc type is not CD-RW\n"); 2007 2005 return 1; 2008 2006 } 2009 2007 if (!(buf[6] & 0x4)) { 2010 - pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); 2008 + dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n"); 2011 2009 return 1; 2012 2010 } 2013 2011 ··· 2027 2025 *speed = us_clv_to_speed[sp]; 2028 2026 break; 2029 2027 default: 2030 - pkt_notice(pd, "unknown disc sub-type %d\n", st); 2028 + dev_notice(ddev, "unknown disc sub-type %d\n", st); 2031 2029 return 1; 2032 2030 } 2033 2031 if (*speed) { 2034 - pkt_info(pd, "maximum media speed: %d\n", *speed); 2032 + dev_info(ddev, "maximum media speed: %d\n", *speed); 2035 2033 return 0; 2036 2034 } else { 2037 - pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); 2035 + dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st); 2038 2036 return 1; 2039 2037 } 2040 2038 } 2041 2039 2042 2040 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) 2043 2041 { 2042 + struct device *ddev = disk_to_dev(pd->disk); 2044 2043 struct packet_command cgc; 2045 2044 struct scsi_sense_hdr sshdr; 2046 2045 int ret; 2047 2046 2048 - pkt_dbg(2, pd, "Performing OPC\n"); 2047 + dev_dbg(ddev, "Performing OPC\n"); 2049 2048 2050 2049 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 2051 2050 cgc.sshdr = &sshdr; ··· 2061 2058 2062 2059 static int pkt_open_write(struct pktcdvd_device *pd) 2063 2060 { 2061 + struct device *ddev = disk_to_dev(pd->disk); 2064 2062 int ret; 2065 2063 unsigned int write_speed, media_write_speed, read_speed; 2066 2064 2067 2065 ret = pkt_probe_settings(pd); 2068 2066 if (ret) { 2069 - pkt_dbg(2, pd, "failed probe\n"); 2067 + dev_dbg(ddev, "failed probe\n"); 2070 2068 return ret; 2071 2069 } 2072 2070 2073 2071 ret = pkt_set_write_settings(pd); 2074 2072 if (ret) { 2075 - pkt_dbg(1, pd, "failed saving write settings\n"); 2073 + dev_notice(ddev, "failed saving write settings\n"); 2076 2074 return -EIO; 2077 2075 } 2078 2076 ··· 2086 2082 case 0x13: /* DVD-RW */ 2087 2083 case 0x1a: /* DVD+RW */ 2088 2084 case 0x12: /* DVD-RAM */ 2089 - pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); 2085 + dev_notice(ddev, "write speed %ukB/s\n", write_speed); 2090 2086 break; 2091 2087 default: 2092 2088 ret = pkt_media_speed(pd, &media_write_speed); 2093 2089 if (ret) 2094 2090 media_write_speed = 16; 2095 2091 write_speed = min(write_speed, media_write_speed * 177); 2096 - pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); 2092 + dev_notice(ddev, "write speed %ux\n", write_speed / 176); 2097 2093 break; 2098 2094 } 2099 2095 read_speed = write_speed; 2100 2096 2101 2097 ret = pkt_set_speed(pd, write_speed, read_speed); 2102 2098 if (ret) { 2103 - pkt_dbg(1, pd, "couldn't set write speed\n"); 2099 + dev_notice(ddev, "couldn't set write speed\n"); 2104 2100 return -EIO; 2105 2101 } 2106 2102 pd->write_speed = write_speed; 2107 2103 pd->read_speed = read_speed; 2108 2104 2109 2105 ret = pkt_perform_opc(pd); 2110 - if (ret) { 2111 - pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); 2112 - } 2106 + if (ret) 2107 + dev_notice(ddev, "Optimum Power Calibration failed\n"); 2113 2108 2114 2109 return 0; 2115 2110 } ··· 2118 2115 */ 2119 2116 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) 2120 2117 { 2118 + struct device *ddev = disk_to_dev(pd->disk); 2121 2119 int ret; 2122 2120 long lba; 2123 2121 struct request_queue *q; ··· 2138 2134 2139 2135 ret = pkt_get_last_written(pd, &lba); 2140 2136 if (ret) { 2141 - pkt_err(pd, "pkt_get_last_written failed\n"); 2137 + dev_err(ddev, "pkt_get_last_written failed\n"); 2142 2138 goto out_putdev; 2143 2139 } 2144 2140 ··· 2167 2163 2168 2164 if (write) { 2169 2165 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { 2170 - pkt_err(pd, "not enough memory for buffers\n"); 2166 + dev_err(ddev, "not enough memory for buffers\n"); 2171 2167 ret = -ENOMEM; 2172 2168 goto out_putdev; 2173 2169 } 2174 - pkt_info(pd, "%lukB available on disc\n", lba << 1); 2170 + dev_info(ddev, "%lukB available on disc\n", lba << 1); 2175 2171 } 2176 2172 2177 2173 return 0; ··· 2188 2184 */ 2189 2185 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) 2190 2186 { 2187 + struct device *ddev = disk_to_dev(pd->disk); 2188 + 2191 2189 if (flush && pkt_flush_cache(pd)) 2192 - pkt_dbg(1, pd, "not flushing cache\n"); 2190 + dev_notice(ddev, "not flushing cache\n"); 2193 2191 2194 2192 pkt_lock_door(pd, 0); 2195 2193 ··· 2392 2386 static void pkt_submit_bio(struct bio *bio) 2393 2387 { 2394 2388 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; 2389 + struct device *ddev = disk_to_dev(pd->disk); 2395 2390 struct bio *split; 2396 2391 2397 2392 bio = bio_split_to_limits(bio); 2398 2393 if (!bio) 2399 2394 return; 2400 2395 2401 - pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", 2396 + dev_dbg(ddev, "start = %6llx stop = %6llx\n", 2402 2397 (unsigned long long)bio->bi_iter.bi_sector, 2403 2398 (unsigned long long)bio_end_sector(bio)); 2404 2399 ··· 2412 2405 } 2413 2406 2414 2407 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2415 - pkt_notice(pd, "WRITE for ro device (%llu)\n", 2408 + dev_notice(ddev, "WRITE for ro device (%llu)\n", 2416 2409 (unsigned long long)bio->bi_iter.bi_sector); 2417 2410 goto end_io; 2418 2411 } 2419 2412 2420 2413 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { 2421 - pkt_err(pd, "wrong bio size\n"); 2414 + dev_err(ddev, "wrong bio size\n"); 2422 2415 goto end_io; 2423 2416 } 2424 2417 ··· 2460 2453 char *msg; 2461 2454 int states[PACKET_NUM_STATES]; 2462 2455 2463 - seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev); 2456 + seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev); 2464 2457 2465 2458 seq_printf(m, "\nSettings:\n"); 2466 2459 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); ··· 2516 2509 2517 2510 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) 2518 2511 { 2512 + struct device *ddev = disk_to_dev(pd->disk); 2519 2513 int i; 2520 2514 struct block_device *bdev; 2521 2515 struct scsi_device *sdev; 2522 2516 2523 2517 if (pd->pkt_dev == dev) { 2524 - pkt_err(pd, "recursive setup not allowed\n"); 2518 + dev_err(ddev, "recursive setup not allowed\n"); 2525 2519 return -EBUSY; 2526 2520 } 2527 2521 for (i = 0; i < MAX_WRITERS; i++) { ··· 2530 2522 if (!pd2) 2531 2523 continue; 2532 2524 if (pd2->bdev->bd_dev == dev) { 2533 - pkt_err(pd, "%pg already setup\n", pd2->bdev); 2525 + dev_err(ddev, "%pg already setup\n", pd2->bdev); 2534 2526 return -EBUSY; 2535 2527 } 2536 2528 if (pd2->pkt_dev == dev) { 2537 - pkt_err(pd, "can't chain pktcdvd devices\n"); 2529 + dev_err(ddev, "can't chain pktcdvd devices\n"); 2538 2530 return -EBUSY; 2539 2531 } 2540 2532 } ··· 2558 2550 pkt_init_queue(pd); 2559 2551 2560 2552 atomic_set(&pd->cdrw.pending_bios, 0); 2561 - pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); 2553 + pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); 2562 2554 if (IS_ERR(pd->cdrw.thread)) { 2563 - pkt_err(pd, "can't start kernel thread\n"); 2555 + dev_err(ddev, "can't start kernel thread\n"); 2564 2556 goto out_mem; 2565 2557 } 2566 2558 2567 - proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd); 2568 - pkt_dbg(1, pd, "writer mapped to %pg\n", bdev); 2559 + proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); 2560 + dev_notice(ddev, "writer mapped to %pg\n", bdev); 2569 2561 return 0; 2570 2562 2571 2563 out_mem: ··· 2578 2570 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 2579 2571 { 2580 2572 struct pktcdvd_device *pd = bdev->bd_disk->private_data; 2573 + struct device *ddev = disk_to_dev(pd->disk); 2581 2574 int ret; 2582 2575 2583 - pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", 2584 - cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2576 + dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2585 2577 2586 2578 mutex_lock(&pktcdvd_mutex); 2587 2579 switch (cmd) { ··· 2607 2599 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 2608 2600 break; 2609 2601 default: 2610 - pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); 2602 + dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd); 2611 2603 ret = -ENOTTY; 2612 2604 } 2613 2605 mutex_unlock(&pktcdvd_mutex); ··· 2685 2677 spin_lock_init(&pd->iosched.lock); 2686 2678 bio_list_init(&pd->iosched.read_queue); 2687 2679 bio_list_init(&pd->iosched.write_queue); 2688 - sprintf(pd->name, DRIVER_NAME"%d", idx); 2689 2680 init_waitqueue_head(&pd->wqueue); 2690 2681 pd->bio_queue = RB_ROOT; 2691 2682 ··· 2701 2694 disk->minors = 1; 2702 2695 disk->fops = &pktcdvd_ops; 2703 2696 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; 2704 - strcpy(disk->disk_name, pd->name); 2697 + snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx); 2705 2698 disk->private_data = pd; 2706 2699 2707 2700 pd->pkt_dev = MKDEV(pktdev_major, idx); ··· 2743 2736 static int pkt_remove_dev(dev_t pkt_dev) 2744 2737 { 2745 2738 struct pktcdvd_device *pd; 2739 + struct device *ddev; 2746 2740 int idx; 2747 2741 int ret = 0; 2748 2742 ··· 2764 2756 ret = -EBUSY; 2765 2757 goto out; 2766 2758 } 2759 + 2760 + ddev = disk_to_dev(pd->disk); 2761 + 2767 2762 if (!IS_ERR(pd->cdrw.thread)) 2768 2763 kthread_stop(pd->cdrw.thread); 2769 2764 ··· 2777 2766 2778 2767 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); 2779 2768 2780 - remove_proc_entry(pd->name, pkt_proc); 2781 - pkt_dbg(1, pd, "writer unmapped\n"); 2769 + remove_proc_entry(pd->disk->disk_name, pkt_proc); 2770 + dev_notice(ddev, "writer unmapped\n"); 2782 2771 2783 2772 del_gendisk(pd->disk); 2784 2773 put_disk(pd->disk);
-1
include/linux/pktcdvd.h
··· 156 156 { 157 157 struct block_device *bdev; /* dev attached */ 158 158 dev_t pkt_dev; /* our dev */ 159 - char name[20]; 160 159 struct packet_settings settings; 161 160 struct packet_stats stats; 162 161 int refcnt; /* Open count */
+1
include/uapi/linux/pktcdvd.h
··· 16 16 #include <linux/types.h> 17 17 18 18 /* 19 + * UNUSED: 19 20 * 1 for normal debug messages, 2 is very verbose. 0 to turn it off. 20 21 */ 21 22 #define PACKET_DEBUG 1