Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

usb: xhci: Make some static functions global

This patch makes some static functions global to avoid duplications
in different files. These functions can be used in the implementation
of xHCI debug capability. There is no functional change.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Lu Baolu and committed by
Greg Kroah-Hartman
67d2ea9f 103afda0

+72 -42
+55 -39
drivers/usb/host/xhci-mem.c
··· 357 357 * Set the end flag and the cycle toggle bit on the last segment. 358 358 * See section 4.9.1 and figures 15 and 16. 359 359 */ 360 - static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 360 + struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 361 361 unsigned int num_segs, unsigned int cycle_state, 362 362 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) 363 363 { ··· 454 454 return 0; 455 455 } 456 456 457 - static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 457 + struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 458 458 int type, gfp_t flags) 459 459 { 460 460 struct xhci_container_ctx *ctx; ··· 479 479 return ctx; 480 480 } 481 481 482 - static void xhci_free_container_ctx(struct xhci_hcd *xhci, 482 + void xhci_free_container_ctx(struct xhci_hcd *xhci, 483 483 struct xhci_container_ctx *ctx) 484 484 { 485 485 if (!ctx) ··· 1757 1757 kfree(command); 1758 1758 } 1759 1759 1760 + int xhci_alloc_erst(struct xhci_hcd *xhci, 1761 + struct xhci_ring *evt_ring, 1762 + struct xhci_erst *erst, 1763 + gfp_t flags) 1764 + { 1765 + size_t size; 1766 + unsigned int val; 1767 + struct xhci_segment *seg; 1768 + struct xhci_erst_entry *entry; 1769 + 1770 + size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1771 + erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1772 + size, 1773 + &erst->erst_dma_addr, 1774 + flags); 1775 + if (!erst->entries) 1776 + return -ENOMEM; 1777 + 1778 + memset(erst->entries, 0, size); 1779 + erst->num_entries = evt_ring->num_segs; 1780 + 1781 + seg = evt_ring->first_seg; 1782 + for (val = 0; val < evt_ring->num_segs; val++) { 1783 + entry = &erst->entries[val]; 1784 + entry->seg_addr = cpu_to_le64(seg->dma); 1785 + entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 1786 + entry->rsvd = 0; 1787 + seg = seg->next; 1788 + } 1789 + 1790 + return 0; 1791 + } 1792 + 1793 + void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) 1794 + { 1795 + size_t size; 1796 + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1797 + 1798 + size = sizeof(struct xhci_erst_entry) * (erst->num_entries); 1799 + if (erst->entries) 1800 + dma_free_coherent(dev, size, 1801 + erst->entries, 1802 + erst->erst_dma_addr); 1803 + erst->entries = NULL; 1804 + } 1805 + 1760 1806 void xhci_mem_cleanup(struct xhci_hcd *xhci) 1761 1807 { 1762 1808 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1763 - int size; 1764 1809 int i, j, num_ports; 1765 1810 1766 1811 cancel_delayed_work_sync(&xhci->cmd_timer); 1767 1812 1768 - /* Free the Event Ring Segment Table and the actual Event Ring */ 1769 - size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1770 - if (xhci->erst.entries) 1771 - dma_free_coherent(dev, size, 1772 - xhci->erst.entries, xhci->erst.erst_dma_addr); 1773 - xhci->erst.entries = NULL; 1774 - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST"); 1813 + xhci_free_erst(xhci, &xhci->erst); 1814 + 1775 1815 if (xhci->event_ring) 1776 1816 xhci_ring_free(xhci, xhci->event_ring); 1777 1817 xhci->event_ring = NULL; ··· 2348 2308 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2349 2309 unsigned int val, val2; 2350 2310 u64 val_64; 2351 - struct xhci_segment *seg; 2352 - u32 page_size, temp; 2353 - int i; 2311 + u32 page_size, temp; 2312 + int i, ret; 2354 2313 2355 2314 INIT_LIST_HEAD(&xhci->cmd_list); 2356 2315 ··· 2488 2449 if (xhci_check_trb_in_td_math(xhci) < 0) 2489 2450 goto fail; 2490 2451 2491 - xhci->erst.entries = dma_alloc_coherent(dev, 2492 - sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, 2493 - flags); 2494 - if (!xhci->erst.entries) 2452 + ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); 2453 + if (ret) 2495 2454 goto fail; 2496 - xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2497 - "// Allocated event ring segment table at 0x%llx", 2498 - (unsigned long long)dma); 2499 - 2500 - memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 2501 - xhci->erst.num_entries = ERST_NUM_SEGS; 2502 - xhci->erst.erst_dma_addr = dma; 2503 - xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2504 - "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx", 2505 - xhci->erst.num_entries, 2506 - xhci->erst.entries, 2507 - (unsigned long long)xhci->erst.erst_dma_addr); 2508 - 2509 - /* set ring base address and size for each segment table entry */ 2510 - for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 2511 - struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 2512 - entry->seg_addr = cpu_to_le64(seg->dma); 2513 - entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 2514 - entry->rsvd = 0; 2515 - seg = seg->next; 2516 - } 2517 2455 2518 2456 /* set ERST count with the number of entries in the segment table */ 2519 2457 val = readl(&xhci->ir_set->erst_size);
+2 -2
drivers/usb/host/xhci-ring.c
··· 153 153 * See Cycle bit rules. SW is the consumer for the event ring only. 154 154 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 155 155 */ 156 - static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 156 + void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 157 157 { 158 158 /* event ring doesn't have link trbs, check for last trb */ 159 159 if (ring->type == TYPE_EVENT) { ··· 2957 2957 return 0; 2958 2958 } 2959 2959 2960 - static unsigned int count_trbs(u64 addr, u64 len) 2960 + unsigned int count_trbs(u64 addr, u64 len) 2961 2961 { 2962 2962 unsigned int num_trbs; 2963 2963
+15 -1
drivers/usb/host/xhci.h
··· 1965 1965 int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, 1966 1966 struct usb_device *udev, struct usb_host_endpoint *ep, 1967 1967 gfp_t mem_flags); 1968 + struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 1969 + unsigned int num_segs, unsigned int cycle_state, 1970 + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); 1968 1971 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); 1969 1972 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, 1970 - unsigned int num_trbs, gfp_t flags); 1973 + unsigned int num_trbs, gfp_t flags); 1974 + int xhci_alloc_erst(struct xhci_hcd *xhci, 1975 + struct xhci_ring *evt_ring, 1976 + struct xhci_erst *erst, 1977 + gfp_t flags); 1978 + void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1971 1979 void xhci_free_endpoint_ring(struct xhci_hcd *xhci, 1972 1980 struct xhci_virt_device *virt_dev, 1973 1981 unsigned int ep_index); ··· 2006 1998 void xhci_urb_free_priv(struct urb_priv *urb_priv); 2007 1999 void xhci_free_command(struct xhci_hcd *xhci, 2008 2000 struct xhci_command *command); 2001 + struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 2002 + int type, gfp_t flags); 2003 + void xhci_free_container_ctx(struct xhci_hcd *xhci, 2004 + struct xhci_container_ctx *ctx); 2009 2005 2010 2006 /* xHCI host controller glue */ 2011 2007 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); ··· 2083 2071 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 2084 2072 unsigned int ep_index, unsigned int stream_id); 2085 2073 void xhci_cleanup_command_queue(struct xhci_hcd *xhci); 2074 + void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); 2075 + unsigned int count_trbs(u64 addr, u64 len); 2086 2076 2087 2077 /* xHCI roothub code */ 2088 2078 void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,