Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IA64] SN specific version of dma_get_required_mask()

Create a platform specific version of dma_get_required_mask()
for ia64 SN Altix. All SN Altix platforms support 64 bit DMA
addressing regardless of the size of system memory.
Create an ia64 machvec for dma_get_required_mask, with the
SN version unconditionally returning DMA_64BIT_MASK.

Signed-off-by: John Keller <jpk@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

John Keller and committed by
Tony Luck
175add19 a6a3bb5c

+49 -5
+4 -5
Documentation/DMA-API.txt
··· 170 170 u64 171 171 dma_get_required_mask(struct device *dev) 172 172 173 - After setting the mask with dma_set_mask(), this API returns the 174 - actual mask (within that already set) that the platform actually 175 - requires to operate efficiently. Usually this means the returned mask 173 + This API returns the mask that the platform requires to 174 + operate efficiently. Usually this means the returned mask 176 175 is the minimum required to cover all of memory. Examining the 177 176 required mask gives drivers with variable descriptor sizes the 178 177 opportunity to use smaller descriptors as necessary. 179 178 180 179 Requesting the required mask does not alter the current mask. If you 181 - wish to take advantage of it, you should issue another dma_set_mask() 182 - call to lower the mask again. 180 + wish to take advantage of it, you should issue a dma_set_mask() 181 + call to set the mask to the value returned. 183 182 184 183 185 184 Part Id - Streaming DMA mappings
+2
arch/ia64/include/asm/dma-mapping.h
··· 9 9 #include <linux/scatterlist.h> 10 10 #include <asm/swiotlb.h> 11 11 12 + #define ARCH_HAS_DMA_GET_REQUIRED_MASK 13 + 12 14 struct dma_mapping_ops { 13 15 int (*mapping_error)(struct device *dev, 14 16 dma_addr_t dma_addr);
+7
arch/ia64/include/asm/machvec.h
··· 62 62 typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); 63 63 typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); 64 64 typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); 65 + typedef u64 ia64_mv_dma_get_required_mask (struct device *); 65 66 66 67 /* 67 68 * WARNING: The legacy I/O space is _architected_. Platforms are ··· 160 159 # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device 161 160 # define platform_dma_mapping_error ia64_mv.dma_mapping_error 162 161 # define platform_dma_supported ia64_mv.dma_supported 162 + # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask 163 163 # define platform_irq_to_vector ia64_mv.irq_to_vector 164 164 # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 165 165 # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem ··· 215 213 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; 216 214 ia64_mv_dma_mapping_error *dma_mapping_error; 217 215 ia64_mv_dma_supported *dma_supported; 216 + ia64_mv_dma_get_required_mask *dma_get_required_mask; 218 217 ia64_mv_irq_to_vector *irq_to_vector; 219 218 ia64_mv_local_vector_to_irq *local_vector_to_irq; 220 219 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; ··· 266 263 platform_dma_sync_sg_for_device, \ 267 264 platform_dma_mapping_error, \ 268 265 platform_dma_supported, \ 266 + platform_dma_get_required_mask, \ 269 267 platform_irq_to_vector, \ 270 268 platform_local_vector_to_irq, \ 271 269 platform_pci_get_legacy_mem, \ ··· 369 365 #endif 370 366 #ifndef platform_dma_supported 371 367 # define platform_dma_supported swiotlb_dma_supported 368 + #endif 369 + #ifndef platform_dma_get_required_mask 370 + # define platform_dma_get_required_mask ia64_dma_get_required_mask 372 371 #endif 373 372 #ifndef platform_irq_to_vector 374 373 # define platform_irq_to_vector __ia64_irq_to_vector
+1
arch/ia64/include/asm/machvec_init.h
··· 3 3 4 4 extern ia64_mv_send_ipi_t ia64_send_ipi; 5 5 extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; 6 + extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask; 6 7 extern ia64_mv_irq_to_vector __ia64_irq_to_vector; 7 8 extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; 8 9 extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
+2
arch/ia64/include/asm/machvec_sn2.h
··· 67 67 extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; 68 68 extern ia64_mv_dma_mapping_error sn_dma_mapping_error; 69 69 extern ia64_mv_dma_supported sn_dma_supported; 70 + extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; 70 71 extern ia64_mv_migrate_t sn_migrate; 71 72 extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 72 73 extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; ··· 124 123 #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device 125 124 #define platform_dma_mapping_error sn_dma_mapping_error 126 125 #define platform_dma_supported sn_dma_supported 126 + #define platform_dma_get_required_mask sn_dma_get_required_mask 127 127 #define platform_migrate sn_migrate 128 128 #define platform_kernel_launch_event sn_kernel_launch_event 129 129 #ifdef CONFIG_PCI_MSI
+27
arch/ia64/pci/pci.c
··· 19 19 #include <linux/ioport.h> 20 20 #include <linux/slab.h> 21 21 #include <linux/spinlock.h> 22 + #include <linux/bootmem.h> 22 23 23 24 #include <asm/machvec.h> 24 25 #include <asm/page.h> ··· 748 747 } 749 748 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 750 749 } 750 + 751 + u64 ia64_dma_get_required_mask(struct device *dev) 752 + { 753 + u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); 754 + u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); 755 + u64 mask; 756 + 757 + if (!high_totalram) { 758 + /* convert to mask just covering totalram */ 759 + low_totalram = (1 << (fls(low_totalram) - 1)); 760 + low_totalram += low_totalram - 1; 761 + mask = low_totalram; 762 + } else { 763 + high_totalram = (1 << (fls(high_totalram) - 1)); 764 + high_totalram += high_totalram - 1; 765 + mask = (((u64)high_totalram) << 32) + 0xffffffff; 766 + } 767 + return mask; 768 + } 769 + EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask); 770 + 771 + u64 dma_get_required_mask(struct device *dev) 772 + { 773 + return platform_dma_get_required_mask(dev); 774 + } 775 + EXPORT_SYMBOL_GPL(dma_get_required_mask); 751 776 752 777 static int __init pcibios_init(void) 753 778 {
+6
arch/ia64/sn/pci/pci_dma.c
··· 356 356 } 357 357 EXPORT_SYMBOL(sn_dma_mapping_error); 358 358 359 + u64 sn_dma_get_required_mask(struct device *dev) 360 + { 361 + return DMA_64BIT_MASK; 362 + } 363 + EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); 364 + 359 365 char *sn_pci_get_legacy_mem(struct pci_bus *bus) 360 366 { 361 367 if (!SN_PCIBUS_BUSSOFT(bus))