Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Drivers: hv: Cosmetic changes for hv.c and balloon.c

Fix issues reported by checkpatch.pl script in hv.c and
balloon.c
- Remove unnecessary parentheses
- Remove extra newlines
- Remove extra spaces
- Add spaces between comparison operators
- Remove comparison with NULL in if statements

No functional changes intended

Signed-off-by: Aditya Nagesh <adityanagesh@linux.microsoft.com>
Reviewed-by: Saurabh Sengar <ssengar@linux.microsoft.com>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1717152521-6439-1-git-send-email-adityanagesh@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1717152521-6439-1-git-send-email-adityanagesh@linux.microsoft.com>

authored by

Aditya Nagesh and committed by
Wei Liu
831bcbce a0b13403

+53 -82
+18 -19
drivers/hv/hv.c
··· 45 45 * This involves a hypercall. 46 46 */ 47 47 int hv_post_message(union hv_connection_id connection_id, 48 - enum hv_message_type message_type, 49 - void *payload, size_t payload_size) 48 + enum hv_message_type message_type, 49 + void *payload, size_t payload_size) 50 50 { 51 51 struct hv_input_post_message *aligned_msg; 52 52 unsigned long flags; ··· 86 86 status = HV_STATUS_INVALID_PARAMETER; 87 87 } else { 88 88 status = hv_do_hypercall(HVCALL_POST_MESSAGE, 89 - aligned_msg, NULL); 89 + aligned_msg, NULL); 90 90 } 91 91 92 92 local_irq_restore(flags); ··· 111 111 112 112 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), 113 113 GFP_KERNEL); 114 - if (hv_context.hv_numa_map == NULL) { 114 + if (!hv_context.hv_numa_map) { 115 115 pr_err("Unable to allocate NUMA map\n"); 116 116 goto err; 117 117 } ··· 120 120 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); 121 121 122 122 tasklet_init(&hv_cpu->msg_dpc, 123 - vmbus_on_msg_dpc, (unsigned long) hv_cpu); 123 + vmbus_on_msg_dpc, (unsigned long)hv_cpu); 124 124 125 125 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { 126 126 hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC); 127 - if (hv_cpu->post_msg_page == NULL) { 127 + if (!hv_cpu->post_msg_page) { 128 128 pr_err("Unable to allocate post msg page\n"); 129 129 goto err; 130 130 } ··· 147 147 if (!ms_hyperv.paravisor_present && !hv_root_partition) { 148 148 hv_cpu->synic_message_page = 149 149 (void *)get_zeroed_page(GFP_ATOMIC); 150 - if (hv_cpu->synic_message_page == NULL) { 150 + if (!hv_cpu->synic_message_page) { 151 151 pr_err("Unable to allocate SYNIC message page\n"); 152 152 goto err; 153 153 } 154 154 155 155 hv_cpu->synic_event_page = 156 156 (void *)get_zeroed_page(GFP_ATOMIC); 157 - if (hv_cpu->synic_event_page == NULL) { 157 + if (!hv_cpu->synic_event_page) { 158 158 pr_err("Unable to allocate SYNIC event page\n"); 159 159 160 160 free_page((unsigned long)hv_cpu->synic_message_page); ··· 203 203 return ret; 204 204 } 205 205 206 - 207 206 void hv_synic_free(void) 208 207 { 209 208 int cpu, ret; 210 209 211 210 for_each_present_cpu(cpu) { 212 - struct hv_per_cpu_context *hv_cpu 213 - = per_cpu_ptr(hv_context.cpu_context, cpu); 211 + struct hv_per_cpu_context *hv_cpu = 212 + per_cpu_ptr(hv_context.cpu_context, cpu); 214 213 215 214 /* It's better to leak the page if the encryption fails. */ 216 215 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { ··· 261 262 */ 262 263 void hv_synic_enable_regs(unsigned int cpu) 263 264 { 264 - struct hv_per_cpu_context *hv_cpu 265 - = per_cpu_ptr(hv_context.cpu_context, cpu); 265 + struct hv_per_cpu_context *hv_cpu = 266 + per_cpu_ptr(hv_context.cpu_context, cpu); 266 267 union hv_synic_simp simp; 267 268 union hv_synic_siefp siefp; 268 269 union hv_synic_sint shared_sint; ··· 276 277 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 277 278 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & 278 279 ~ms_hyperv.shared_gpa_boundary; 279 - hv_cpu->synic_message_page 280 - = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 280 + hv_cpu->synic_message_page = 281 + (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 281 282 if (!hv_cpu->synic_message_page) 282 283 pr_err("Fail to map synic message page.\n"); 283 284 } else { ··· 295 296 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 296 297 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & 297 298 ~ms_hyperv.shared_gpa_boundary; 298 - hv_cpu->synic_event_page 299 - = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 299 + hv_cpu->synic_event_page = 300 + (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 300 301 if (!hv_cpu->synic_event_page) 301 302 pr_err("Fail to map synic event page.\n"); 302 303 } else { ··· 347 348 */ 348 349 void hv_synic_disable_regs(unsigned int cpu) 349 350 { 350 - struct hv_per_cpu_context *hv_cpu 351 - = per_cpu_ptr(hv_context.cpu_context, cpu); 351 + struct hv_per_cpu_context *hv_cpu = 352 + per_cpu_ptr(hv_context.cpu_context, cpu); 352 353 union hv_synic_sint shared_sint; 353 354 union hv_synic_simp simp; 354 355 union hv_synic_siefp siefp;
+35 -63
drivers/hv/hv_balloon.c
··· 42 42 * Begin protocol definitions. 43 43 */ 44 44 45 - 46 - 47 45 /* 48 46 * Protocol versions. The low word is the minor version, the high word the major 49 47 * version. ··· 69 71 70 72 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10 71 73 }; 72 - 73 - 74 74 75 75 /* 76 76 * Message Types ··· 98 102 DM_VERSION_1_MAX = 12 99 103 }; 100 104 101 - 102 105 /* 103 106 * Structures defining the dynamic memory management 104 107 * protocol. ··· 110 115 }; 111 116 __u32 version; 112 117 } __packed; 113 - 114 118 115 119 union dm_caps { 116 120 struct { ··· 143 149 __u64 page_range; 144 150 } __packed; 145 151 146 - 147 - 148 152 /* 149 153 * The header for all dynamic memory messages: 150 154 * ··· 166 174 struct dm_header hdr; 167 175 __u8 data[]; /* enclosed message */ 168 176 } __packed; 169 - 170 177 171 178 /* 172 179 * Specific message types supporting the dynamic memory protocol. ··· 263 272 __u32 io_diff; 264 273 } __packed; 265 274 266 - 267 275 /* 268 276 * Message to ask the guest to allocate memory - balloon up message. 269 277 * This message is sent from the host to the guest. The guest may not be ··· 277 287 __u32 reservedz; 278 288 } __packed; 279 289 280 - 281 290 /* 282 291 * Balloon response message; this message is sent from the guest 283 292 * to the host in response to the balloon message. 284 293 * 285 294 * reservedz: Reserved; must be set to zero. 286 295 * more_pages: If FALSE, this is the last message of the transaction. 287 - * if TRUE there will atleast one more message from the guest. 296 + * if TRUE there will be at least one more message from the guest. 288 297 * 289 298 * range_count: The number of ranges in the range array. 290 299 * ··· 304 315 * to the guest to give guest more memory. 305 316 * 306 317 * more_pages: If FALSE, this is the last message of the transaction. 307 - * if TRUE there will atleast one more message from the guest. 318 + * if TRUE there will be at least one more message from the guest. 308 319 * 309 320 * reservedz: Reserved; must be set to zero. 310 321 * ··· 331 342 struct dm_unballoon_response { 332 343 struct dm_header hdr; 333 344 } __packed; 334 - 335 345 336 346 /* 337 347 * Hot add request message. Message sent from the host to the guest. ··· 378 390 INFO_TYPE_MAX_PAGE_CNT = 0, 379 391 MAX_INFO_TYPE 380 392 }; 381 - 382 393 383 394 /* 384 395 * Header for the information message. ··· 468 481 469 482 static int hv_hypercall_multi_failure; 470 483 471 - module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 484 + module_param(hot_add, bool, 0644); 472 485 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 473 486 474 - module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); 487 + module_param(pressure_report_delay, uint, 0644); 475 488 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); 476 489 static atomic_t trans_id = ATOMIC_INIT(0); 477 490 ··· 489 502 DM_HOT_ADD, 490 503 DM_INIT_ERROR 491 504 }; 492 - 493 505 494 506 static __u8 recv_buffer[HV_HYP_PAGE_SIZE]; 495 507 static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE]; ··· 585 599 struct hv_hotadd_gap *gap; 586 600 587 601 /* The page is not backed. */ 588 - if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn)) 602 + if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn) 589 603 return false; 590 604 591 605 /* Check for gaps. */ 592 606 list_for_each_entry(gap, &has->gap_list, list) { 593 - if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) 607 + if (pfn >= gap->start_pfn && pfn < gap->end_pfn) 594 608 return false; 595 609 } 596 610 ··· 770 784 guard(spinlock_irqsave)(&dm_device.ha_lock); 771 785 list_for_each_entry(has, &dm_device.ha_region_list, list) { 772 786 /* The page belongs to a different HAS. */ 773 - if ((pfn < has->start_pfn) || 774 - (pfn + (1UL << order) > has->end_pfn)) 787 + if (pfn < has->start_pfn || 788 + (pfn + (1UL << order) > has->end_pfn)) 775 789 continue; 776 790 777 791 hv_bring_pgs_online(has, pfn, 1UL << order); ··· 832 846 } 833 847 834 848 static unsigned long handle_pg_range(unsigned long pg_start, 835 - unsigned long pg_count) 849 + unsigned long pg_count) 836 850 { 837 851 unsigned long start_pfn = pg_start; 838 852 unsigned long pfn_cnt = pg_count; ··· 843 857 unsigned long res = 0, flags; 844 858 845 859 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, 846 - pg_start); 860 + pg_start); 847 861 848 862 spin_lock_irqsave(&dm_device.ha_lock, flags); 849 863 list_for_each_entry(has, &dm_device.ha_region_list, list) { ··· 879 893 if (start_pfn > has->start_pfn && 880 894 online_section_nr(pfn_to_section_nr(start_pfn))) 881 895 hv_bring_pgs_online(has, start_pfn, pgs_ol); 882 - 883 896 } 884 897 885 - if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { 898 + if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) { 886 899 /* 887 900 * We have some residual hot add range 888 901 * that needs to be hot added; hot add ··· 984 999 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page; 985 1000 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; 986 1001 987 - if ((rg_start == 0) && (!dm->host_specified_ha_region)) { 1002 + if (rg_start == 0 && !dm->host_specified_ha_region) { 988 1003 /* 989 1004 * The host has not specified the hot-add region. 990 1005 * Based on the hot-add page range being specified, ··· 998 1013 999 1014 if (do_hot_add) 1000 1015 resp.page_count = process_hot_add(pg_start, pfn_cnt, 1001 - rg_start, rg_sz); 1016 + rg_start, rg_sz); 1002 1017 1003 1018 dm->num_pages_added += resp.page_count; 1004 1019 #endif ··· 1176 1191 sizeof(struct dm_status), 1177 1192 (unsigned long)NULL, 1178 1193 VM_PKT_DATA_INBAND, 0); 1179 - 1180 1194 } 1181 1195 1182 1196 static void free_balloon_pages(struct hv_dynmem_device *dm, 1183 - union dm_mem_page_range *range_array) 1197 + union dm_mem_page_range *range_array) 1184 1198 { 1185 1199 int num_pages = range_array->finfo.page_cnt; 1186 1200 __u64 start_frame = range_array->finfo.start_page; ··· 1194 1210 adjust_managed_page_count(pg, 1); 1195 1211 } 1196 1212 } 1197 - 1198 - 1199 1213 1200 1214 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, 1201 1215 unsigned int num_pages, ··· 1240 1258 page_to_pfn(pg); 1241 1259 bl_resp->range_array[i].finfo.page_cnt = alloc_unit; 1242 1260 bl_resp->hdr.size += sizeof(union dm_mem_page_range); 1243 - 1244 1261 } 1245 1262 1246 1263 return i * alloc_unit; ··· 1293 1312 1294 1313 if (num_ballooned == 0 || num_ballooned == num_pages) { 1295 1314 pr_debug("Ballooned %u out of %u requested pages.\n", 1296 - num_pages, dm_device.balloon_wrk.num_pages); 1315 + num_pages, dm_device.balloon_wrk.num_pages); 1297 1316 1298 1317 bl_resp->more_pages = 0; 1299 1318 done = true; ··· 1327 1346 1328 1347 for (i = 0; i < bl_resp->range_count; i++) 1329 1348 free_balloon_pages(&dm_device, 1330 - &bl_resp->range_array[i]); 1349 + &bl_resp->range_array[i]); 1331 1350 1332 1351 done = true; 1333 1352 } 1334 1353 } 1335 - 1336 1354 } 1337 1355 1338 1356 static void balloon_down(struct hv_dynmem_device *dm, 1339 - struct dm_unballoon_request *req) 1357 + struct dm_unballoon_request *req) 1340 1358 { 1341 1359 union dm_mem_page_range *range_array = req->range_array; 1342 1360 int range_count = req->range_count; ··· 1349 1369 } 1350 1370 1351 1371 pr_debug("Freed %u ballooned pages.\n", 1352 - prev_pages_ballooned - dm->num_pages_ballooned); 1372 + prev_pages_ballooned - dm->num_pages_ballooned); 1353 1373 1354 1374 if (req->more_pages == 1) 1355 1375 return; ··· 1374 1394 struct hv_dynmem_device *dm = dm_dev; 1375 1395 1376 1396 while (!kthread_should_stop()) { 1377 - wait_for_completion_interruptible_timeout( 1378 - &dm_device.config_event, 1*HZ); 1397 + wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ); 1379 1398 /* 1380 1399 * The host expects us to post information on the memory 1381 1400 * pressure every second. ··· 1398 1419 return 0; 1399 1420 } 1400 1421 1401 - 1402 1422 static void version_resp(struct hv_dynmem_device *dm, 1403 - struct dm_version_response *vresp) 1423 + struct dm_version_response *vresp) 1404 1424 { 1405 1425 struct dm_version_request version_req; 1406 1426 int ret; ··· 1460 1482 } 1461 1483 1462 1484 static void cap_resp(struct hv_dynmem_device *dm, 1463 - struct dm_capabilities_resp_msg *cap_resp) 1485 + struct dm_capabilities_resp_msg *cap_resp) 1464 1486 { 1465 1487 if (!cap_resp->is_accepted) { 1466 1488 pr_err("Capabilities not accepted by host\n"); ··· 1493 1515 switch (dm_hdr->type) { 1494 1516 case DM_VERSION_RESPONSE: 1495 1517 version_resp(dm, 1496 - (struct dm_version_response *)dm_msg); 1518 + (struct dm_version_response *)dm_msg); 1497 1519 break; 1498 1520 1499 1521 case DM_CAPABILITIES_RESPONSE: ··· 1523 1545 1524 1546 dm->state = DM_BALLOON_DOWN; 1525 1547 balloon_down(dm, 1526 - (struct dm_unballoon_request *)recv_buffer); 1548 + (struct dm_unballoon_request *)recv_buffer); 1527 1549 break; 1528 1550 1529 1551 case DM_MEM_HOT_ADD_REQUEST: ··· 1561 1583 1562 1584 default: 1563 1585 pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); 1564 - 1565 1586 } 1566 1587 } 1567 - 1568 1588 } 1569 1589 1570 1590 #define HV_LARGE_REPORTING_ORDER 9 1571 1591 #define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \ 1572 1592 HV_LARGE_REPORTING_ORDER) 1573 1593 static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, 1574 - struct scatterlist *sgl, unsigned int nents) 1594 + struct scatterlist *sgl, unsigned int nents) 1575 1595 { 1576 1596 unsigned long flags; 1577 1597 struct hv_memory_hint *hint; ··· 1604 1628 */ 1605 1629 1606 1630 /* page reporting for pages 2MB or higher */ 1607 - if (order >= HV_LARGE_REPORTING_ORDER ) { 1631 + if (order >= HV_LARGE_REPORTING_ORDER) { 1608 1632 range->page.largepage = 1; 1609 1633 range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB; 1610 1634 range->base_large_pfn = page_to_hvpfn( ··· 1618 1642 range->page.additional_pages = 1619 1643 (sg->length / HV_HYP_PAGE_SIZE) - 1; 1620 1644 } 1621 - 1622 1645 } 1623 1646 1624 1647 status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0, 1625 1648 hint, NULL); 1626 1649 local_irq_restore(flags); 1627 1650 if (!hv_result_success(status)) { 1628 - 1629 1651 pr_err("Cold memory discard hypercall failed with status %llx\n", 1630 - status); 1652 + status); 1631 1653 if (hv_hypercall_multi_failure > 0) 1632 1654 hv_hypercall_multi_failure++; 1633 1655 1634 1656 if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) { 1635 1657 pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n"); 1636 1658 pr_err("Defaulting to page_reporting_order %d\n", 1637 - pageblock_order); 1659 + pageblock_order); 1638 1660 page_reporting_order = pageblock_order; 1639 1661 hv_hypercall_multi_failure++; 1640 1662 return -EINVAL; ··· 1666 1692 pr_err("Failed to enable cold memory discard: %d\n", ret); 1667 1693 } else { 1668 1694 pr_info("Cold memory discard hint enabled with order %d\n", 1669 - page_reporting_order); 1695 + page_reporting_order); 1670 1696 } 1671 1697 } 1672 1698 ··· 1749 1775 if (ret) 1750 1776 goto out; 1751 1777 1752 - t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1778 + t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ); 1753 1779 if (t == 0) { 1754 1780 ret = -ETIMEDOUT; 1755 1781 goto out; ··· 1807 1833 if (ret) 1808 1834 goto out; 1809 1835 1810 - t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1836 + t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ); 1811 1837 if (t == 0) { 1812 1838 ret = -ETIMEDOUT; 1813 1839 goto out; ··· 1848 1874 char *sname; 1849 1875 1850 1876 seq_printf(f, "%-22s: %u.%u\n", "host_version", 1851 - DYNMEM_MAJOR_VERSION(dm->version), 1852 - DYNMEM_MINOR_VERSION(dm->version)); 1877 + DYNMEM_MAJOR_VERSION(dm->version), 1878 + DYNMEM_MINOR_VERSION(dm->version)); 1853 1879 1854 1880 seq_printf(f, "%-22s:", "capabilities"); 1855 1881 if (ballooning_enabled()) ··· 1898 1924 seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned); 1899 1925 1900 1926 seq_printf(f, "%-22s: %lu\n", "total_pages_committed", 1901 - get_pages_committed(dm)); 1927 + get_pages_committed(dm)); 1902 1928 1903 1929 seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count", 1904 - dm->max_dynamic_page_count); 1930 + dm->max_dynamic_page_count); 1905 1931 1906 1932 return 0; 1907 1933 } ··· 1911 1937 static void hv_balloon_debugfs_init(struct hv_dynmem_device *b) 1912 1938 { 1913 1939 debugfs_create_file("hv-balloon", 0444, NULL, b, 1914 - &hv_balloon_debug_fops); 1940 + &hv_balloon_debug_fops); 1915 1941 } 1916 1942 1917 1943 static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b) ··· 2069 2095 tasklet_enable(&hv_dev->channel->callback_event); 2070 2096 2071 2097 return 0; 2072 - 2073 2098 } 2074 2099 2075 2100 static int balloon_resume(struct hv_device *dev) ··· 2127 2154 2128 2155 static int __init init_balloon_drv(void) 2129 2156 { 2130 - 2131 2157 return vmbus_driver_register(&balloon_drv); 2132 2158 } 2133 2159