Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 4.0-rc3 into tty-testing

This resolves a merge issue in drivers/tty/serial/8250/8250_pci.c

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+1422 -2132
-4
arch/x86/Kconfig.debug
··· 43 43 with klogd/syslogd or the X server. You should normally N here, 44 44 unless you want to debug such a crash. 45 45 46 - config EARLY_PRINTK_INTEL_MID 47 - bool "Early printk for Intel MID platform support" 48 - depends on EARLY_PRINTK && X86_INTEL_MID 49 - 50 46 config EARLY_PRINTK_DBGP 51 47 bool "Early printk via EHCI debug port" 52 48 depends on EARLY_PRINTK && PCI
-3
arch/x86/include/asm/intel-mid.h
··· 136 136 #define SFI_MTMR_MAX_NUM 8 137 137 #define SFI_MRTC_MAX 8 138 138 139 - extern struct console early_hsu_console; 140 - extern void hsu_early_console_init(const char *); 141 - 142 139 extern void intel_scu_devices_create(void); 143 140 extern void intel_scu_devices_destroy(void); 144 141
-6
arch/x86/kernel/early_printk.c
··· 375 375 if (!strncmp(buf, "xen", 3)) 376 376 early_console_register(&xenboot_console, keep); 377 377 #endif 378 - #ifdef CONFIG_EARLY_PRINTK_INTEL_MID 379 - if (!strncmp(buf, "hsu", 3)) { 380 - hsu_early_console_init(buf + 3); 381 - early_console_register(&early_hsu_console, keep); 382 - } 383 - #endif 384 378 #ifdef CONFIG_EARLY_PRINTK_EFI 385 379 if (!strncmp(buf, "efi", 3)) 386 380 early_console_register(&early_efi_console, keep);
-1
arch/x86/platform/intel-mid/Makefile
··· 1 1 obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o 2 - obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o 3 2 4 3 # SFI specific code 5 4 ifdef CONFIG_X86_INTEL_MID
-112
arch/x86/platform/intel-mid/early_printk_intel_mid.c
··· 1 - /* 2 - * early_printk_intel_mid.c - early consoles for Intel MID platforms 3 - * 4 - * Copyright (c) 2008-2010, Intel Corporation 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * as published by the Free Software Foundation; version 2 9 - * of the License. 10 - */ 11 - 12 - /* 13 - * This file implements early console named hsu. 14 - * hsu is based on a High Speed UART device which only exists in the Medfield 15 - * platform 16 - */ 17 - 18 - #include <linux/serial_reg.h> 19 - #include <linux/serial_mfd.h> 20 - #include <linux/console.h> 21 - #include <linux/kernel.h> 22 - #include <linux/delay.h> 23 - #include <linux/io.h> 24 - 25 - #include <asm/fixmap.h> 26 - #include <asm/pgtable.h> 27 - #include <asm/intel-mid.h> 28 - 29 - /* 30 - * Following is the early console based on Medfield HSU (High 31 - * Speed UART) device. 32 - */ 33 - #define HSU_PORT_BASE 0xffa28080 34 - 35 - static void __iomem *phsu; 36 - 37 - void hsu_early_console_init(const char *s) 38 - { 39 - unsigned long paddr, port = 0; 40 - u8 lcr; 41 - 42 - /* 43 - * Select the early HSU console port if specified by user in the 44 - * kernel command line. 45 - */ 46 - if (*s && !kstrtoul(s, 10, &port)) 47 - port = clamp_val(port, 0, 2); 48 - 49 - paddr = HSU_PORT_BASE + port * 0x80; 50 - phsu = (void __iomem *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr); 51 - 52 - /* Disable FIFO */ 53 - writeb(0x0, phsu + UART_FCR); 54 - 55 - /* Set to default 115200 bps, 8n1 */ 56 - lcr = readb(phsu + UART_LCR); 57 - writeb((0x80 | lcr), phsu + UART_LCR); 58 - writeb(0x18, phsu + UART_DLL); 59 - writeb(lcr, phsu + UART_LCR); 60 - writel(0x3600, phsu + UART_MUL*4); 61 - 62 - writeb(0x8, phsu + UART_MCR); 63 - writeb(0x7, phsu + UART_FCR); 64 - writeb(0x3, phsu + UART_LCR); 65 - 66 - /* Clear IRQ status */ 67 - readb(phsu + UART_LSR); 68 - readb(phsu + UART_RX); 69 - readb(phsu + UART_IIR); 70 - readb(phsu + UART_MSR); 71 - 72 - /* Enable FIFO */ 73 - writeb(0x7, phsu + UART_FCR); 74 - } 75 - 76 - #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 77 - 78 - static void early_hsu_putc(char ch) 79 - { 80 - unsigned int timeout = 10000; /* 10ms */ 81 - u8 status; 82 - 83 - while (--timeout) { 84 - status = readb(phsu + UART_LSR); 85 - if (status & BOTH_EMPTY) 86 - break; 87 - udelay(1); 88 - } 89 - 90 - /* Only write the char when there was no timeout */ 91 - if (timeout) 92 - writeb(ch, phsu + UART_TX); 93 - } 94 - 95 - static void early_hsu_write(struct console *con, const char *str, unsigned n) 96 - { 97 - int i; 98 - 99 - for (i = 0; i < n && *str; i++) { 100 - if (*str == '\n') 101 - early_hsu_putc('\r'); 102 - early_hsu_putc(*str); 103 - str++; 104 - } 105 - } 106 - 107 - struct console early_hsu_console = { 108 - .name = "earlyhsu", 109 - .write = early_hsu_write, 110 - .flags = CON_PRINTBUFFER, 111 - .index = -1, 112 - };
+2
drivers/dma/Kconfig
··· 125 125 EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on 126 126 some Txxx and Bxxx parts. 127 127 128 + source "drivers/dma/hsu/Kconfig" 129 + 128 130 config MPC512X_DMA 129 131 tristate "Freescale MPC512x built-in DMA engine support" 130 132 depends on PPC_MPC512x || PPC_MPC831x
+1
drivers/dma/Makefile
··· 11 11 obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 12 12 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 13 13 obj-$(CONFIG_FSL_DMA) += fsldma.o 14 + obj-$(CONFIG_HSU_DMA) += hsu/ 14 15 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o 15 16 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 16 17 obj-$(CONFIG_MV_XOR) += mv_xor.o
+14
drivers/dma/hsu/Kconfig
··· 1 + # DMA engine configuration for hsu 2 + config HSU_DMA 3 + tristate "High Speed UART DMA support" 4 + select DMA_ENGINE 5 + select DMA_VIRTUAL_CHANNELS 6 + 7 + config HSU_DMA_PCI 8 + tristate "High Speed UART DMA PCI driver" 9 + depends on PCI 10 + select HSU_DMA 11 + help 12 + Support the High Speed UART DMA on the platfroms that 13 + enumerate it as a PCI device. For example, Intel Medfield 14 + has integrated this HSU DMA controller.
+5
drivers/dma/hsu/Makefile
··· 1 + obj-$(CONFIG_HSU_DMA) += hsu_dma.o 2 + hsu_dma-objs := hsu.o 3 + 4 + obj-$(CONFIG_HSU_DMA_PCI) += hsu_dma_pci.o 5 + hsu_dma_pci-objs := pci.o
+504
drivers/dma/hsu/hsu.c
··· 1 + /* 2 + * Core driver for the High Speed UART DMA 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 6 + * 7 + * Partially based on the bits found in drivers/tty/serial/mfd.c. 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + /* 15 + * DMA channel allocation: 16 + * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA 17 + * Write (UART RX). 18 + * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to 19 + * port 3, and so on. 20 + */ 21 + 22 + #include <linux/delay.h> 23 + #include <linux/dmaengine.h> 24 + #include <linux/dma-mapping.h> 25 + #include <linux/init.h> 26 + #include <linux/module.h> 27 + #include <linux/slab.h> 28 + 29 + #include "hsu.h" 30 + 31 + #define HSU_DMA_BUSWIDTHS \ 32 + BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 33 + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 34 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 35 + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 36 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 37 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \ 38 + BIT(DMA_SLAVE_BUSWIDTH_16_BYTES) 39 + 40 + static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc) 41 + { 42 + hsu_chan_writel(hsuc, HSU_CH_CR, 0); 43 + } 44 + 45 + static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc) 46 + { 47 + u32 cr = HSU_CH_CR_CHA; 48 + 49 + if (hsuc->direction == DMA_MEM_TO_DEV) 50 + cr &= ~HSU_CH_CR_CHD; 51 + else if (hsuc->direction == DMA_DEV_TO_MEM) 52 + cr |= HSU_CH_CR_CHD; 53 + 54 + hsu_chan_writel(hsuc, HSU_CH_CR, cr); 55 + } 56 + 57 + static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) 58 + { 59 + struct dma_slave_config *config = &hsuc->config; 60 + struct hsu_dma_desc *desc = hsuc->desc; 61 + u32 bsr, mtsr; 62 + u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI; 63 + unsigned int i, count; 64 + 65 + if (hsuc->direction == DMA_MEM_TO_DEV) { 66 + bsr = config->dst_maxburst; 67 + mtsr = config->dst_addr_width; 68 + } else if (hsuc->direction == DMA_DEV_TO_MEM) { 69 + bsr = config->src_maxburst; 70 + mtsr = config->src_addr_width; 71 + } else { 72 + /* Not supported direction */ 73 + return; 74 + } 75 + 76 + hsu_chan_disable(hsuc); 77 + 78 + hsu_chan_writel(hsuc, HSU_CH_DCR, 0); 79 + hsu_chan_writel(hsuc, HSU_CH_BSR, bsr); 80 + hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); 81 + 82 + /* Set descriptors */ 83 + count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; 84 + for (i = 0; i < count; i++) { 85 + hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); 86 + hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); 87 + 88 + /* Prepare value for DCR */ 89 + dcr |= HSU_CH_DCR_DESCA(i); 90 + dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */ 91 + 92 + desc->active++; 93 + } 94 + /* Only for the last descriptor in the chain */ 95 + dcr |= HSU_CH_DCR_CHSOD(count - 1); 96 + dcr |= HSU_CH_DCR_CHDI(count - 1); 97 + 98 + hsu_chan_writel(hsuc, HSU_CH_DCR, dcr); 99 + 100 + hsu_chan_enable(hsuc); 101 + } 102 + 103 + static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) 104 + { 105 + unsigned long flags; 106 + 107 + spin_lock_irqsave(&hsuc->lock, flags); 108 + hsu_chan_disable(hsuc); 109 + hsu_chan_writel(hsuc, HSU_CH_DCR, 0); 110 + spin_unlock_irqrestore(&hsuc->lock, flags); 111 + } 112 + 113 + static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) 114 + { 115 + unsigned long flags; 116 + 117 + spin_lock_irqsave(&hsuc->lock, flags); 118 + hsu_dma_chan_start(hsuc); 119 + spin_unlock_irqrestore(&hsuc->lock, flags); 120 + } 121 + 122 + static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) 123 + { 124 + struct virt_dma_desc *vdesc; 125 + 126 + /* Get the next descriptor */ 127 + vdesc = vchan_next_desc(&hsuc->vchan); 128 + if (!vdesc) { 129 + hsuc->desc = NULL; 130 + return; 131 + } 132 + 133 + list_del(&vdesc->node); 134 + hsuc->desc = to_hsu_dma_desc(vdesc); 135 + 136 + /* Start the channel with a new descriptor */ 137 + hsu_dma_start_channel(hsuc); 138 + } 139 + 140 + static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) 141 + { 142 + unsigned long flags; 143 + u32 sr; 144 + 145 + spin_lock_irqsave(&hsuc->lock, flags); 146 + sr = hsu_chan_readl(hsuc, HSU_CH_SR); 147 + spin_unlock_irqrestore(&hsuc->lock, flags); 148 + 149 + return sr; 150 + } 151 + 152 + irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) 153 + { 154 + struct hsu_dma_chan *hsuc; 155 + struct hsu_dma_desc *desc; 156 + unsigned long flags; 157 + u32 sr; 158 + 159 + /* Sanity check */ 160 + if (nr >= chip->pdata->nr_channels) 161 + return IRQ_NONE; 162 + 163 + hsuc = &chip->hsu->chan[nr]; 164 + 165 + /* 166 + * No matter what situation, need read clear the IRQ status 167 + * There is a bug, see Errata 5, HSD 2900918 168 + */ 169 + sr = hsu_dma_chan_get_sr(hsuc); 170 + if (!sr) 171 + return IRQ_NONE; 172 + 173 + /* Timeout IRQ, need wait some time, see Errata 2 */ 174 + if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY)) 175 + udelay(2); 176 + 177 + sr &= ~HSU_CH_SR_DESCTO_ANY; 178 + if (!sr) 179 + return IRQ_HANDLED; 180 + 181 + spin_lock_irqsave(&hsuc->vchan.lock, flags); 182 + desc = hsuc->desc; 183 + if (desc) { 184 + if (sr & HSU_CH_SR_CHE) { 185 + desc->status = DMA_ERROR; 186 + } else if (desc->active < desc->nents) { 187 + hsu_dma_start_channel(hsuc); 188 + } else { 189 + vchan_cookie_complete(&desc->vdesc); 190 + desc->status = DMA_COMPLETE; 191 + hsu_dma_start_transfer(hsuc); 192 + } 193 + } 194 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 195 + 196 + return IRQ_HANDLED; 197 + } 198 + EXPORT_SYMBOL_GPL(hsu_dma_irq); 199 + 200 + static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) 201 + { 202 + struct hsu_dma_desc *desc; 203 + 204 + desc = kzalloc(sizeof(*desc), GFP_ATOMIC); 205 + if (!desc) 206 + return NULL; 207 + 208 + desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_ATOMIC); 209 + if (!desc->sg) { 210 + kfree(desc); 211 + return NULL; 212 + } 213 + 214 + return desc; 215 + } 216 + 217 + static void hsu_dma_desc_free(struct virt_dma_desc *vdesc) 218 + { 219 + struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); 220 + 221 + kfree(desc->sg); 222 + kfree(desc); 223 + } 224 + 225 + static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( 226 + struct dma_chan *chan, struct scatterlist *sgl, 227 + unsigned int sg_len, enum dma_transfer_direction direction, 228 + unsigned long flags, void *context) 229 + { 230 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 231 + struct hsu_dma_desc *desc; 232 + struct scatterlist *sg; 233 + unsigned int i; 234 + 235 + desc = hsu_dma_alloc_desc(sg_len); 236 + if (!desc) 237 + return NULL; 238 + 239 + for_each_sg(sgl, sg, sg_len, i) { 240 + desc->sg[i].addr = sg_dma_address(sg); 241 + desc->sg[i].len = sg_dma_len(sg); 242 + } 243 + 244 + desc->nents = sg_len; 245 + desc->direction = direction; 246 + desc->active = 0; 247 + desc->status = DMA_IN_PROGRESS; 248 + 249 + return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags); 250 + } 251 + 252 + static void hsu_dma_issue_pending(struct dma_chan *chan) 253 + { 254 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 255 + unsigned long flags; 256 + 257 + spin_lock_irqsave(&hsuc->vchan.lock, flags); 258 + if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) 259 + hsu_dma_start_transfer(hsuc); 260 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 261 + } 262 + 263 + static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc) 264 + { 265 + size_t bytes = 0; 266 + unsigned int i; 267 + 268 + for (i = desc->active; i < desc->nents; i++) 269 + bytes += desc->sg[i].len; 270 + 271 + return bytes; 272 + } 273 + 274 + static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 275 + { 276 + struct hsu_dma_desc *desc = hsuc->desc; 277 + size_t bytes = hsu_dma_desc_size(desc); 278 + int i; 279 + unsigned long flags; 280 + 281 + spin_lock_irqsave(&hsuc->lock, flags); 282 + i = desc->active % HSU_DMA_CHAN_NR_DESC; 283 + do { 284 + bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); 285 + } while (--i >= 0); 286 + spin_unlock_irqrestore(&hsuc->lock, flags); 287 + 288 + return bytes; 289 + } 290 + 291 + static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, 292 + dma_cookie_t cookie, struct dma_tx_state *state) 293 + { 294 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 295 + struct virt_dma_desc *vdesc; 296 + enum dma_status status; 297 + size_t bytes; 298 + unsigned long flags; 299 + 300 + status = dma_cookie_status(chan, cookie, state); 301 + if (status == DMA_COMPLETE) 302 + return status; 303 + 304 + spin_lock_irqsave(&hsuc->vchan.lock, flags); 305 + vdesc = vchan_find_desc(&hsuc->vchan, cookie); 306 + if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { 307 + bytes = hsu_dma_active_desc_size(hsuc); 308 + dma_set_residue(state, bytes); 309 + status = hsuc->desc->status; 310 + } else if (vdesc) { 311 + bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); 312 + dma_set_residue(state, bytes); 313 + } 314 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 315 + 316 + return status; 317 + } 318 + 319 + static int hsu_dma_slave_config(struct dma_chan *chan, 320 + struct dma_slave_config *config) 321 + { 322 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 323 + 324 + /* Check if chan will be configured for slave transfers */ 325 + if (!is_slave_direction(config->direction)) 326 + return -EINVAL; 327 + 328 + memcpy(&hsuc->config, config, sizeof(hsuc->config)); 329 + 330 + return 0; 331 + } 332 + 333 + static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc) 334 + { 335 + unsigned long flags; 336 + 337 + spin_lock_irqsave(&hsuc->lock, flags); 338 + hsu_chan_disable(hsuc); 339 + spin_unlock_irqrestore(&hsuc->lock, flags); 340 + } 341 + 342 + static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc) 343 + { 344 + unsigned long flags; 345 + 346 + spin_lock_irqsave(&hsuc->lock, flags); 347 + hsu_chan_enable(hsuc); 348 + spin_unlock_irqrestore(&hsuc->lock, flags); 349 + } 350 + 351 + static int hsu_dma_pause(struct dma_chan *chan) 352 + { 353 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 354 + unsigned long flags; 355 + 356 + spin_lock_irqsave(&hsuc->vchan.lock, flags); 357 + if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { 358 + hsu_dma_chan_deactivate(hsuc); 359 + hsuc->desc->status = DMA_PAUSED; 360 + } 361 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 362 + 363 + return 0; 364 + } 365 + 366 + static int hsu_dma_resume(struct dma_chan *chan) 367 + { 368 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 369 + unsigned long flags; 370 + 371 + spin_lock_irqsave(&hsuc->vchan.lock, flags); 372 + if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { 373 + hsuc->desc->status = DMA_IN_PROGRESS; 374 + hsu_dma_chan_activate(hsuc); 375 + } 376 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 377 + 378 + return 0; 379 + } 380 + 381 + static int hsu_dma_terminate_all(struct dma_chan *chan) 382 + { 383 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 384 + unsigned long flags; 385 + LIST_HEAD(head); 386 + 387 + spin_lock_irqsave(&hsuc->vchan.lock, flags); 388 + 389 + hsu_dma_stop_channel(hsuc); 390 + hsuc->desc = NULL; 391 + 392 + vchan_get_all_descriptors(&hsuc->vchan, &head); 393 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 394 + vchan_dma_desc_free_list(&hsuc->vchan, &head); 395 + 396 + return 0; 397 + } 398 + 399 + static int hsu_dma_alloc_chan_resources(struct dma_chan *chan) 400 + { 401 + return 0; 402 + } 403 + 404 + static void hsu_dma_free_chan_resources(struct dma_chan *chan) 405 + { 406 + vchan_free_chan_resources(to_virt_chan(chan)); 407 + } 408 + 409 + int hsu_dma_probe(struct hsu_dma_chip *chip) 410 + { 411 + struct hsu_dma *hsu; 412 + struct hsu_dma_platform_data *pdata = chip->pdata; 413 + void __iomem *addr = chip->regs + chip->offset; 414 + unsigned short i; 415 + int ret; 416 + 417 + hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL); 418 + if (!hsu) 419 + return -ENOMEM; 420 + 421 + chip->hsu = hsu; 422 + 423 + if (!pdata) { 424 + pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); 425 + if (!pdata) 426 + return -ENOMEM; 427 + 428 + chip->pdata = pdata; 429 + 430 + /* Guess nr_channels from the IO space length */ 431 + pdata->nr_channels = (chip->length - chip->offset) / 432 + HSU_DMA_CHAN_LENGTH; 433 + } 434 + 435 + hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels, 436 + sizeof(*hsu->chan), GFP_KERNEL); 437 + if (!hsu->chan) 438 + return -ENOMEM; 439 + 440 + INIT_LIST_HEAD(&hsu->dma.channels); 441 + for (i = 0; i < pdata->nr_channels; i++) { 442 + struct hsu_dma_chan *hsuc = &hsu->chan[i]; 443 + 444 + hsuc->vchan.desc_free = hsu_dma_desc_free; 445 + vchan_init(&hsuc->vchan, &hsu->dma); 446 + 447 + hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 448 + hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; 449 + 450 + spin_lock_init(&hsuc->lock); 451 + } 452 + 453 + dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); 454 + dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask); 455 + 456 + hsu->dma.device_alloc_chan_resources = hsu_dma_alloc_chan_resources; 457 + hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources; 458 + 459 + hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg; 460 + 461 + hsu->dma.device_issue_pending = hsu_dma_issue_pending; 462 + hsu->dma.device_tx_status = hsu_dma_tx_status; 463 + 464 + hsu->dma.device_config = hsu_dma_slave_config; 465 + hsu->dma.device_pause = hsu_dma_pause; 466 + hsu->dma.device_resume = hsu_dma_resume; 467 + hsu->dma.device_terminate_all = hsu_dma_terminate_all; 468 + 469 + hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; 470 + hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; 471 + hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 472 + hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 473 + 474 + hsu->dma.dev = chip->dev; 475 + 476 + ret = dma_async_device_register(&hsu->dma); 477 + if (ret) 478 + return ret; 479 + 480 + dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels); 481 + return 0; 482 + } 483 + EXPORT_SYMBOL_GPL(hsu_dma_probe); 484 + 485 + int hsu_dma_remove(struct hsu_dma_chip *chip) 486 + { 487 + struct hsu_dma *hsu = chip->hsu; 488 + unsigned short i; 489 + 490 + dma_async_device_unregister(&hsu->dma); 491 + 492 + for (i = 0; i < chip->pdata->nr_channels; i++) { 493 + struct hsu_dma_chan *hsuc = &hsu->chan[i]; 494 + 495 + tasklet_kill(&hsuc->vchan.task); 496 + } 497 + 498 + return 0; 499 + } 500 + EXPORT_SYMBOL_GPL(hsu_dma_remove); 501 + 502 + MODULE_LICENSE("GPL v2"); 503 + MODULE_DESCRIPTION("High Speed UART DMA core driver"); 504 + MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+118
drivers/dma/hsu/hsu.h
··· 1 + /* 2 + * Driver for the High Speed UART DMA 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * 6 + * Partially based on the bits found in drivers/tty/serial/mfd.c. 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #ifndef __DMA_HSU_H__ 14 + #define __DMA_HSU_H__ 15 + 16 + #include <linux/spinlock.h> 17 + #include <linux/dma/hsu.h> 18 + 19 + #include "../virt-dma.h" 20 + 21 + #define HSU_CH_SR 0x00 /* channel status */ 22 + #define HSU_CH_CR 0x04 /* channel control */ 23 + #define HSU_CH_DCR 0x08 /* descriptor control */ 24 + #define HSU_CH_BSR 0x10 /* FIFO buffer size */ 25 + #define HSU_CH_MTSR 0x14 /* minimum transfer size */ 26 + #define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */ 27 + #define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */ 28 + #define HSU_CH_D0SAR 0x20 /* desc 0 start addr */ 29 + #define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */ 30 + #define HSU_CH_D1SAR 0x28 31 + #define HSU_CH_D1TSR 0x2c 32 + #define HSU_CH_D2SAR 0x30 33 + #define HSU_CH_D2TSR 0x34 34 + #define HSU_CH_D3SAR 0x38 35 + #define HSU_CH_D3TSR 0x3c 36 + 37 + #define HSU_DMA_CHAN_NR_DESC 4 38 + #define HSU_DMA_CHAN_LENGTH 0x40 39 + 40 + /* Bits in HSU_CH_SR */ 41 + #define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) 42 + #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) 43 + #define HSU_CH_SR_CHE BIT(15) 44 + 45 + /* Bits in HSU_CH_CR */ 46 + #define HSU_CH_CR_CHA BIT(0) 47 + #define HSU_CH_CR_CHD BIT(1) 48 + 49 + /* Bits in HSU_CH_DCR */ 50 + #define HSU_CH_DCR_DESCA(x) BIT(0 + (x)) 51 + #define HSU_CH_DCR_CHSOD(x) BIT(8 + (x)) 52 + #define HSU_CH_DCR_CHSOTO BIT(14) 53 + #define HSU_CH_DCR_CHSOE BIT(15) 54 + #define HSU_CH_DCR_CHDI(x) BIT(16 + (x)) 55 + #define HSU_CH_DCR_CHEI BIT(23) 56 + #define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) 57 + 58 + struct hsu_dma_sg { 59 + dma_addr_t addr; 60 + unsigned int len; 61 + }; 62 + 63 + struct hsu_dma_desc { 64 + struct virt_dma_desc vdesc; 65 + enum dma_transfer_direction direction; 66 + struct hsu_dma_sg *sg; 67 + unsigned int nents; 68 + unsigned int active; 69 + enum dma_status status; 70 + }; 71 + 72 + static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc) 73 + { 74 + return container_of(vdesc, struct hsu_dma_desc, vdesc); 75 + } 76 + 77 + struct hsu_dma_chan { 78 + struct virt_dma_chan vchan; 79 + 80 + void __iomem *reg; 81 + spinlock_t lock; 82 + 83 + /* hardware configuration */ 84 + enum dma_transfer_direction direction; 85 + struct dma_slave_config config; 86 + 87 + struct hsu_dma_desc *desc; 88 + }; 89 + 90 + static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan) 91 + { 92 + return container_of(chan, struct hsu_dma_chan, vchan.chan); 93 + } 94 + 95 + static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset) 96 + { 97 + return readl(hsuc->reg + offset); 98 + } 99 + 100 + static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset, 101 + u32 value) 102 + { 103 + writel(value, hsuc->reg + offset); 104 + } 105 + 106 + struct hsu_dma { 107 + struct dma_device dma; 108 + 109 + /* channels */ 110 + struct hsu_dma_chan *chan; 111 + }; 112 + 113 + static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev) 114 + { 115 + return container_of(ddev, struct hsu_dma, dma); 116 + } 117 + 118 + #endif /* __DMA_HSU_H__ */
+123
drivers/dma/hsu/pci.c
··· 1 + /* 2 + * PCI driver for the High Speed UART DMA 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 6 + * 7 + * Partially based on the bits found in drivers/tty/serial/mfd.c. 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/bitops.h> 15 + #include <linux/device.h> 16 + #include <linux/module.h> 17 + #include <linux/pci.h> 18 + 19 + #include "hsu.h" 20 + 21 + #define HSU_PCI_DMASR 0x00 22 + #define HSU_PCI_DMAISR 0x04 23 + 24 + #define HSU_PCI_CHAN_OFFSET 0x100 25 + 26 + static irqreturn_t hsu_pci_irq(int irq, void *dev) 27 + { 28 + struct hsu_dma_chip *chip = dev; 29 + u32 dmaisr; 30 + unsigned short i; 31 + irqreturn_t ret = IRQ_NONE; 32 + 33 + dmaisr = readl(chip->regs + HSU_PCI_DMAISR); 34 + for (i = 0; i < chip->pdata->nr_channels; i++) { 35 + if (dmaisr & 0x1) 36 + ret |= hsu_dma_irq(chip, i); 37 + dmaisr >>= 1; 38 + } 39 + 40 + return ret; 41 + } 42 + 43 + static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 44 + { 45 + struct hsu_dma_chip *chip; 46 + int ret; 47 + 48 + ret = pcim_enable_device(pdev); 49 + if (ret) 50 + return ret; 51 + 52 + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 53 + if (ret) { 54 + dev_err(&pdev->dev, "I/O memory remapping failed\n"); 55 + return ret; 56 + } 57 + 58 + pci_set_master(pdev); 59 + pci_try_set_mwi(pdev); 60 + 61 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 62 + if (ret) 63 + return ret; 64 + 65 + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 66 + if (ret) 67 + return ret; 68 + 69 + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 70 + if (!chip) 71 + return -ENOMEM; 72 + 73 + chip->dev = &pdev->dev; 74 + chip->regs = pcim_iomap_table(pdev)[0]; 75 + chip->length = pci_resource_len(pdev, 0); 76 + chip->offset = HSU_PCI_CHAN_OFFSET; 77 + chip->irq = pdev->irq; 78 + 79 + pci_enable_msi(pdev); 80 + 81 + ret = hsu_dma_probe(chip); 82 + if (ret) 83 + return ret; 84 + 85 + ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip); 86 + if (ret) 87 + goto err_register_irq; 88 + 89 + pci_set_drvdata(pdev, chip); 90 + 91 + return 0; 92 + 93 + err_register_irq: 94 + hsu_dma_remove(chip); 95 + return ret; 96 + } 97 + 98 + static void hsu_pci_remove(struct pci_dev *pdev) 99 + { 100 + struct hsu_dma_chip *chip = pci_get_drvdata(pdev); 101 + 102 + free_irq(chip->irq, chip); 103 + hsu_dma_remove(chip); 104 + } 105 + 106 + static const struct pci_device_id hsu_pci_id_table[] = { 107 + { PCI_VDEVICE(INTEL, 0x081e), 0 }, 108 + { } 109 + }; 110 + MODULE_DEVICE_TABLE(pci, hsu_pci_id_table); 111 + 112 + static struct pci_driver hsu_pci_driver = { 113 + .name = "hsu_dma_pci", 114 + .id_table = hsu_pci_id_table, 115 + .probe = hsu_pci_probe, 116 + .remove = hsu_pci_remove, 117 + }; 118 + 119 + module_pci_driver(hsu_pci_driver); 120 + 121 + MODULE_LICENSE("GPL v2"); 122 + MODULE_DESCRIPTION("High Speed UART DMA PCI driver"); 123 + MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+2 -2
drivers/tty/serial/8250/8250_core.c
··· 895 895 /* 896 896 * Exar ST16C2550 "A2" devices incorrectly detect as 897 897 * having an EFR, and report an ID of 0x0201. See 898 - * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html 898 + * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html 899 899 */ 900 900 if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16) 901 901 return 1; ··· 1260 1260 serial_out(up, UART_LCR, save_lcr); 1261 1261 1262 1262 port->fifosize = uart_config[up->port.type].fifo_size; 1263 - old_capabilities = up->capabilities; 1263 + old_capabilities = up->capabilities; 1264 1264 up->capabilities = uart_config[port->type].flags; 1265 1265 up->tx_loadsz = uart_config[port->type].tx_loadsz; 1266 1266
+11 -4
drivers/tty/serial/8250/8250_dw.c
··· 416 416 { 417 417 struct uart_8250_port uart = {}; 418 418 struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 419 - struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 419 + int irq = platform_get_irq(pdev, 0); 420 420 struct dw8250_data *data; 421 421 int err; 422 422 423 - if (!regs || !irq) { 424 - dev_err(&pdev->dev, "no registers/irq defined\n"); 423 + if (!regs) { 424 + dev_err(&pdev->dev, "no registers defined\n"); 425 425 return -EINVAL; 426 + } 427 + 428 + if (irq < 0) { 429 + if (irq != -EPROBE_DEFER) 430 + dev_err(&pdev->dev, "cannot get irq\n"); 431 + return irq; 426 432 } 427 433 428 434 spin_lock_init(&uart.port.lock); 429 435 uart.port.mapbase = regs->start; 430 - uart.port.irq = irq->start; 436 + uart.port.irq = irq; 431 437 uart.port.handle_irq = dw8250_handle_irq; 432 438 uart.port.pm = dw8250_do_pm; 433 439 uart.port.type = PORT_8250; ··· 646 640 MODULE_AUTHOR("Jamie Iles"); 647 641 MODULE_LICENSE("GPL"); 648 642 MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver"); 643 + MODULE_ALIAS("platform:dw-apb-uart");
+198 -16
drivers/tty/serial/8250/8250_pci.c
··· 27 27 28 28 #include <linux/dmaengine.h> 29 29 #include <linux/platform_data/dma-dw.h> 30 + #include <linux/platform_data/dma-hsu.h> 30 31 31 32 #include "8250.h" 32 33 ··· 1526 1525 return ret; 1527 1526 } 1528 1527 1528 + #define INTEL_MID_UART_PS 0x30 1529 + #define INTEL_MID_UART_MUL 0x34 1530 + 1531 + static void intel_mid_set_termios_50M(struct uart_port *p, 1532 + struct ktermios *termios, 1533 + struct ktermios *old) 1534 + { 1535 + unsigned int baud = tty_termios_baud_rate(termios); 1536 + u32 ps, mul; 1537 + 1538 + /* 1539 + * The uart clk is 50Mhz, and the baud rate come from: 1540 + * baud = 50M * MUL / (DIV * PS * DLAB) 1541 + * 1542 + * For those basic low baud rate we can get the direct 1543 + * scalar from 2746800, like 115200 = 2746800/24. For those 1544 + * higher baud rate, we handle them case by case, mainly by 1545 + * adjusting the MUL/PS registers, and DIV register is kept 1546 + * as default value 0x3d09 to make things simple. 1547 + */ 1548 + 1549 + ps = 0x10; 1550 + 1551 + switch (baud) { 1552 + case 500000: 1553 + case 1000000: 1554 + case 1500000: 1555 + case 3000000: 1556 + mul = 0x3a98; 1557 + p->uartclk = 48000000; 1558 + break; 1559 + case 2000000: 1560 + case 4000000: 1561 + mul = 0x2710; 1562 + ps = 0x08; 1563 + p->uartclk = 64000000; 1564 + break; 1565 + case 2500000: 1566 + mul = 0x30d4; 1567 + p->uartclk = 40000000; 1568 + break; 1569 + case 3500000: 1570 + mul = 0x3345; 1571 + ps = 0x0c; 1572 + p->uartclk = 56000000; 1573 + break; 1574 + default: 1575 + mul = 0x2400; 1576 + p->uartclk = 29491200; 1577 + } 1578 + 1579 + writel(ps, p->membase + INTEL_MID_UART_PS); /* set PS */ 1580 + writel(mul, p->membase + INTEL_MID_UART_MUL); /* set MUL */ 1581 + 1582 + serial8250_do_set_termios(p, termios, old); 1583 + } 1584 + 1585 + static bool intel_mid_dma_filter(struct dma_chan *chan, void *param) 1586 + { 1587 + struct hsu_dma_slave *s = param; 1588 + 1589 + if (s->dma_dev != chan->device->dev || s->chan_id != chan->chan_id) 1590 + return false; 1591 + 1592 + chan->private = s; 1593 + return true; 1594 + } 1595 + 1596 + static int intel_mid_serial_setup(struct serial_private *priv, 1597 + const struct pciserial_board *board, 1598 + struct uart_8250_port *port, int idx, 1599 + int index, struct pci_dev *dma_dev) 1600 + { 1601 + struct device *dev = port->port.dev; 1602 + struct uart_8250_dma *dma; 1603 + struct hsu_dma_slave *tx_param, *rx_param; 1604 + 1605 + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 1606 + if (!dma) 1607 + return -ENOMEM; 1608 + 1609 + tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL); 1610 + if (!tx_param) 1611 + return -ENOMEM; 1612 + 1613 + rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); 1614 + if (!rx_param) 1615 + return -ENOMEM; 1616 + 1617 + rx_param->chan_id = index * 2 + 1; 1618 + tx_param->chan_id = index * 2; 1619 + 1620 + dma->rxconf.src_maxburst = 64; 1621 + dma->txconf.dst_maxburst = 64; 1622 + 1623 + rx_param->dma_dev = &dma_dev->dev; 1624 + tx_param->dma_dev = &dma_dev->dev; 1625 + 1626 + dma->fn = intel_mid_dma_filter; 1627 + dma->rx_param = rx_param; 1628 + dma->tx_param = tx_param; 1629 + 1630 + port->port.type = PORT_16750; 1631 + port->port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE; 1632 + port->dma = dma; 1633 + 1634 + return pci_default_setup(priv, board, port, idx); 1635 + } 1636 + 1637 + #define PCI_DEVICE_ID_INTEL_PNW_UART1 0x081b 1638 + #define PCI_DEVICE_ID_INTEL_PNW_UART2 0x081c 1639 + #define PCI_DEVICE_ID_INTEL_PNW_UART3 0x081d 1640 + 1641 + static int pnw_serial_setup(struct serial_private *priv, 1642 + const struct pciserial_board *board, 1643 + struct uart_8250_port *port, int idx) 1644 + { 1645 + struct pci_dev *pdev = priv->dev; 1646 + struct pci_dev *dma_dev; 1647 + int index; 1648 + 1649 + switch (pdev->device) { 1650 + case PCI_DEVICE_ID_INTEL_PNW_UART1: 1651 + index = 0; 1652 + break; 1653 + case PCI_DEVICE_ID_INTEL_PNW_UART2: 1654 + index = 1; 1655 + break; 1656 + case PCI_DEVICE_ID_INTEL_PNW_UART3: 1657 + index = 2; 1658 + break; 1659 + default: 1660 + return -EINVAL; 1661 + } 1662 + 1663 + dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 3)); 1664 + 1665 + port->port.set_termios = intel_mid_set_termios_50M; 1666 + 1667 + return intel_mid_serial_setup(priv, board, port, idx, index, dma_dev); 1668 + } 1669 + 1529 1670 static int 1530 1671 pci_omegapci_setup(struct serial_private *priv, 1531 1672 const struct pciserial_board *board, ··· 2129 1986 .subvendor = PCI_ANY_ID, 2130 1987 .subdevice = PCI_ANY_ID, 2131 1988 .setup = byt_serial_setup, 1989 + }, 1990 + { 1991 + .vendor = PCI_VENDOR_ID_INTEL, 1992 + .device = PCI_DEVICE_ID_INTEL_PNW_UART1, 1993 + .subvendor = PCI_ANY_ID, 1994 + .subdevice = PCI_ANY_ID, 1995 + .setup = pnw_serial_setup, 1996 + }, 1997 + { 1998 + .vendor = PCI_VENDOR_ID_INTEL, 1999 + .device = PCI_DEVICE_ID_INTEL_PNW_UART2, 2000 + .subvendor = PCI_ANY_ID, 2001 + .subdevice = PCI_ANY_ID, 2002 + .setup = pnw_serial_setup, 2003 + }, 2004 + { 2005 + .vendor = PCI_VENDOR_ID_INTEL, 2006 + .device = PCI_DEVICE_ID_INTEL_PNW_UART3, 2007 + .subvendor = PCI_ANY_ID, 2008 + .subdevice = PCI_ANY_ID, 2009 + .setup = pnw_serial_setup, 2132 2010 }, 2133 2011 { 2134 2012 .vendor = PCI_VENDOR_ID_INTEL, ··· 3028 2864 pbn_ADDIDATA_PCIe_8_3906250, 3029 2865 pbn_ce4100_1_115200, 3030 2866 pbn_byt, 2867 + pbn_pnw, 3031 2868 pbn_qrk, 3032 2869 pbn_omegapci, 3033 2870 pbn_NETMOS9900_2s_115200, ··· 3795 3630 .uart_offset = 0x80, 3796 3631 .reg_shift = 2, 3797 3632 }, 3633 + [pbn_pnw] = { 3634 + .flags = FL_BASE0, 3635 + .num_ports = 1, 3636 + .base_baud = 115200, 3637 + }, 3798 3638 [pbn_qrk] = { 3799 3639 .flags = FL_BASE0, 3800 3640 .num_ports = 1, ··· 4176 4006 pci_disable_device(dev); 4177 4007 } 4178 4008 4179 - #ifdef CONFIG_PM 4180 - static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state) 4009 + #ifdef CONFIG_PM_SLEEP 4010 + static int pciserial_suspend_one(struct device *dev) 4181 4011 { 4182 - struct serial_private *priv = pci_get_drvdata(dev); 4012 + struct pci_dev *pdev = to_pci_dev(dev); 4013 + struct serial_private *priv = pci_get_drvdata(pdev); 4183 4014 4184 4015 if (priv) 4185 4016 pciserial_suspend_ports(priv); 4186 4017 4187 - pci_save_state(dev); 4188 - pci_set_power_state(dev, pci_choose_state(dev, state)); 4189 4018 return 0; 4190 4019 } 4191 4020 4192 - static int pciserial_resume_one(struct pci_dev *dev) 4021 + static int pciserial_resume_one(struct device *dev) 4193 4022 { 4023 + struct pci_dev *pdev = to_pci_dev(dev); 4024 + struct serial_private *priv = pci_get_drvdata(pdev); 4194 4025 int err; 4195 - struct serial_private *priv = pci_get_drvdata(dev); 4196 - 4197 - pci_set_power_state(dev, PCI_D0); 4198 - pci_restore_state(dev); 4199 4026 4200 4027 if (priv) { 4201 4028 /* 4202 4029 * The device may have been disabled. Re-enable it. 4203 4030 */ 4204 - err = pci_enable_device(dev); 4031 + err = pci_enable_device(pdev); 4205 4032 /* FIXME: We cannot simply error out here */ 4206 4033 if (err) 4207 - dev_err(&dev->dev, "Unable to re-enable ports, trying to continue.\n"); 4034 + dev_err(dev, "Unable to re-enable ports, trying to continue.\n"); 4208 4035 pciserial_resume_ports(priv); 4209 4036 } 4210 4037 return 0; 4211 4038 } 4212 4039 #endif 4040 + 4041 + static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one, 4042 + pciserial_resume_one); 4213 4043 4214 4044 static struct pci_device_id serial_pci_tbl[] = { 4215 4045 /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */ ··· 5533 5363 pbn_byt }, 5534 5364 5535 5365 /* 5366 + * Intel Penwell 5367 + */ 5368 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PNW_UART1, 5369 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5370 + pbn_pnw}, 5371 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PNW_UART2, 5372 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5373 + pbn_pnw}, 5374 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PNW_UART3, 5375 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5376 + pbn_pnw}, 5377 + 5378 + /* 5536 5379 * Intel Quark x1000 5537 5380 */ 5538 5381 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART, ··· 5693 5510 .name = "serial", 5694 5511 .probe = pciserial_init_one, 5695 5512 .remove = pciserial_remove_one, 5696 - #ifdef CONFIG_PM 5697 - .suspend = pciserial_suspend_one, 5698 - .resume = pciserial_resume_one, 5699 - #endif 5513 + .driver = { 5514 + .pm = &pciserial_pm_ops, 5515 + }, 5700 5516 .id_table = serial_pci_tbl, 5701 5517 .err_handler = &serial8250_err_handler, 5702 5518 };
+3 -13
drivers/tty/serial/Kconfig
··· 20 20 21 21 config SERIAL_AMBA_PL010 22 22 tristate "ARM AMBA PL010 serial port support" 23 - depends on ARM_AMBA && (BROKEN || !ARCH_VERSATILE) 23 + depends on ARM_AMBA 24 24 select SERIAL_CORE 25 25 help 26 26 This selects the ARM(R) AMBA(R) PrimeCell PL010 UART. If you have ··· 483 483 your boot loader (lilo or loadlin) about how to pass options to the 484 484 kernel at boot time.) 485 485 486 - config SERIAL_MFD_HSU 487 - tristate "Medfield High Speed UART support" 488 - depends on PCI 489 - select SERIAL_CORE 490 - 491 - config SERIAL_MFD_HSU_CONSOLE 492 - bool "Medfile HSU serial console support" 493 - depends on SERIAL_MFD_HSU=y 494 - select SERIAL_CORE_CONSOLE 495 - 496 486 config SERIAL_BFIN 497 487 tristate "Blackfin serial port support" 498 488 depends on BLACKFIN ··· 825 835 826 836 config SERIAL_PMACZILOG 827 837 tristate "Mac or PowerMac z85c30 ESCC support" 828 - depends on (M68K && MAC) || (PPC_OF && PPC_PMAC) 838 + depends on (M68K && MAC) || PPC_PMAC 829 839 select SERIAL_CORE 830 840 help 831 841 This driver supports the Zilog z85C30 serial ports found on ··· 1143 1153 1144 1154 config SERIAL_OF_PLATFORM_NWPSERIAL 1145 1155 tristate "NWP serial port driver" 1146 - depends on PPC_OF && PPC_DCR 1156 + depends on PPC_DCR 1147 1157 select SERIAL_OF_PLATFORM 1148 1158 select SERIAL_CORE_CONSOLE 1149 1159 select SERIAL_CORE
-1
drivers/tty/serial/Makefile
··· 78 78 obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o 79 79 obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o 80 80 obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o 81 - obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o 82 81 obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o 83 82 obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o 84 83 obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
+130 -105
drivers/tty/serial/amba-pl011.c
··· 58 58 #include <linux/pinctrl/consumer.h> 59 59 #include <linux/sizes.h> 60 60 #include <linux/io.h> 61 + #include <linux/workqueue.h> 61 62 62 63 #define UART_NR 14 63 64 ··· 157 156 unsigned int lcrh_tx; /* vendor-specific */ 158 157 unsigned int lcrh_rx; /* vendor-specific */ 159 158 unsigned int old_cr; /* state during shutdown */ 159 + struct delayed_work tx_softirq_work; 160 160 bool autorts; 161 + unsigned int tx_irq_seen; /* 0=none, 1=1, 2=2 or more */ 161 162 char type[12]; 162 163 #ifdef CONFIG_DMA_ENGINE 163 164 /* DMA stuff */ ··· 167 164 bool using_rx_dma; 168 165 struct pl011_dmarx_data dmarx; 169 166 struct pl011_dmatx_data dmatx; 167 + bool dma_probed; 170 168 #endif 171 169 }; 172 170 ··· 265 261 } 266 262 } 267 263 268 - static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap) 264 + static void pl011_dma_probe(struct uart_amba_port *uap) 269 265 { 270 266 /* DMA is the sole user of the platform data right now */ 271 267 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); 268 + struct device *dev = uap->port.dev; 272 269 struct dma_slave_config tx_conf = { 273 270 .dst_addr = uap->port.mapbase + UART01x_DR, 274 271 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, ··· 280 275 struct dma_chan *chan; 281 276 dma_cap_mask_t mask; 282 277 283 - chan = dma_request_slave_channel(dev, "tx"); 278 + uap->dma_probed = true; 279 + chan = dma_request_slave_channel_reason(dev, "tx"); 280 + if (IS_ERR(chan)) { 281 + if (PTR_ERR(chan) == -EPROBE_DEFER) { 282 + dev_info(uap->port.dev, "DMA driver not ready\n"); 283 + uap->dma_probed = false; 284 + return; 285 + } 284 286 285 - if (!chan) { 286 287 /* We need platform data */ 287 288 if (!plat || !plat->dma_filter) { 288 289 dev_info(uap->port.dev, "no DMA platform data\n"); ··· 396 385 } 397 386 } 398 387 399 - #ifndef MODULE 400 - /* 401 - * Stack up the UARTs and let the above initcall be done at device 402 - * initcall time, because the serial driver is called as an arch 403 - * initcall, and at this time the DMA subsystem is not yet registered. 404 - * At this point the driver will switch over to using DMA where desired. 405 - */ 406 - struct dma_uap { 407 - struct list_head node; 408 - struct uart_amba_port *uap; 409 - struct device *dev; 410 - }; 411 - 412 - static LIST_HEAD(pl011_dma_uarts); 413 - 414 - static int __init pl011_dma_initcall(void) 415 - { 416 - struct list_head *node, *tmp; 417 - 418 - list_for_each_safe(node, tmp, &pl011_dma_uarts) { 419 - struct dma_uap *dmau = list_entry(node, struct dma_uap, node); 420 - pl011_dma_probe_initcall(dmau->dev, dmau->uap); 421 - list_del(node); 422 - kfree(dmau); 423 - } 424 - return 0; 425 - } 426 - 427 - device_initcall(pl011_dma_initcall); 428 - 429 - static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) 430 - { 431 - struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL); 432 - if (dmau) { 433 - dmau->uap = uap; 434 - dmau->dev = dev; 435 - list_add_tail(&dmau->node, &pl011_dma_uarts); 436 - } 437 - } 438 - #else 439 - static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) 440 - { 441 - pl011_dma_probe_initcall(dev, uap); 442 - } 443 - #endif 444 - 445 388 static void pl011_dma_remove(struct uart_amba_port *uap) 446 389 { 447 - /* TODO: remove the initcall if it has not yet executed */ 448 390 if (uap->dmatx.chan) 449 391 dma_release_channel(uap->dmatx.chan); 450 392 if (uap->dmarx.chan) 451 393 dma_release_channel(uap->dmarx.chan); 452 394 } 453 395 454 - /* Forward declare this for the refill routine */ 396 + /* Forward declare these for the refill routine */ 455 397 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 398 + static void pl011_start_tx_pio(struct uart_amba_port *uap); 456 399 457 400 /* 458 401 * The current DMA TX buffer has been sent. ··· 444 479 return; 445 480 } 446 481 447 - if (pl011_dma_tx_refill(uap) <= 0) { 482 + if (pl011_dma_tx_refill(uap) <= 0) 448 483 /* 449 484 * We didn't queue a DMA buffer for some reason, but we 450 485 * have data pending to be sent. Re-enable the TX IRQ. 451 486 */ 452 - uap->im |= UART011_TXIM; 453 - writew(uap->im, uap->port.membase + UART011_IMSC); 454 - } 487 + pl011_start_tx_pio(uap); 488 + 455 489 spin_unlock_irqrestore(&uap->port.lock, flags); 456 490 } 457 491 ··· 628 664 if (!uap->dmatx.queued) { 629 665 if (pl011_dma_tx_refill(uap) > 0) { 630 666 uap->im &= ~UART011_TXIM; 631 - ret = true; 632 - } else { 633 - uap->im |= UART011_TXIM; 667 + writew(uap->im, uap->port.membase + 668 + UART011_IMSC); 669 + } else 634 670 ret = false; 635 - } 636 - writew(uap->im, uap->port.membase + UART011_IMSC); 637 671 } else if (!(uap->dmacr & UART011_TXDMAE)) { 638 672 uap->dmacr |= UART011_TXDMAE; 639 673 writew(uap->dmacr, ··· 983 1021 { 984 1022 int ret; 985 1023 1024 + if (!uap->dma_probed) 1025 + pl011_dma_probe(uap); 1026 + 986 1027 if (!uap->dmatx.chan) 987 1028 return; 988 1029 ··· 1107 1142 1108 1143 #else 1109 1144 /* Blank functions if the DMA engine is not available */ 1110 - static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) 1145 + static inline void pl011_dma_probe(struct uart_amba_port *uap) 1111 1146 { 1112 1147 } 1113 1148 ··· 1173 1208 pl011_dma_tx_stop(uap); 1174 1209 } 1175 1210 1211 + static bool pl011_tx_chars(struct uart_amba_port *uap); 1212 + 1213 + /* Start TX with programmed I/O only (no DMA) */ 1214 + static void pl011_start_tx_pio(struct uart_amba_port *uap) 1215 + { 1216 + uap->im |= UART011_TXIM; 1217 + writew(uap->im, uap->port.membase + UART011_IMSC); 1218 + if (!uap->tx_irq_seen) 1219 + pl011_tx_chars(uap); 1220 + } 1221 + 1176 1222 static void pl011_start_tx(struct uart_port *port) 1177 1223 { 1178 1224 struct uart_amba_port *uap = 1179 1225 container_of(port, struct uart_amba_port, port); 1180 1226 1181 - if (!pl011_dma_tx_start(uap)) { 1182 - uap->im |= UART011_TXIM; 1183 - writew(uap->im, uap->port.membase + UART011_IMSC); 1184 - } 1227 + if (!pl011_dma_tx_start(uap)) 1228 + pl011_start_tx_pio(uap); 1185 1229 } 1186 1230 1187 1231 static void pl011_stop_rx(struct uart_port *port) ··· 1248 1274 spin_lock(&uap->port.lock); 1249 1275 } 1250 1276 1251 - static void pl011_tx_chars(struct uart_amba_port *uap) 1277 + /* 1278 + * Transmit a character 1279 + * There must be at least one free entry in the TX FIFO to accept the char. 1280 + * 1281 + * Returns true if the FIFO might have space in it afterwards; 1282 + * returns false if the FIFO definitely became full. 1283 + */ 1284 + static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) 1285 + { 1286 + writew(c, uap->port.membase + UART01x_DR); 1287 + uap->port.icount.tx++; 1288 + 1289 + if (likely(uap->tx_irq_seen > 1)) 1290 + return true; 1291 + 1292 + return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF); 1293 + } 1294 + 1295 + static bool pl011_tx_chars(struct uart_amba_port *uap) 1252 1296 { 1253 1297 struct circ_buf *xmit = &uap->port.state->xmit; 1254 1298 int count; 1255 1299 1300 + if (unlikely(uap->tx_irq_seen < 2)) 1301 + /* 1302 + * Initial FIFO fill level unknown: we must check TXFF 1303 + * after each write, so just try to fill up the FIFO. 1304 + */ 1305 + count = uap->fifosize; 1306 + else /* tx_irq_seen >= 2 */ 1307 + /* 1308 + * FIFO initially at least half-empty, so we can simply 1309 + * write half the FIFO without polling TXFF. 1310 + 1311 + * Note: the *first* TX IRQ can still race with 1312 + * pl011_start_tx_pio(), which can result in the FIFO 1313 + * being fuller than expected in that case. 1314 + */ 1315 + count = uap->fifosize >> 1; 1316 + 1317 + /* 1318 + * If the FIFO is full we're guaranteed a TX IRQ at some later point, 1319 + * and can't transmit immediately in any case: 1320 + */ 1321 + if (unlikely(uap->tx_irq_seen < 2 && 1322 + readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)) 1323 + return false; 1324 + 1256 1325 if (uap->port.x_char) { 1257 - writew(uap->port.x_char, uap->port.membase + UART01x_DR); 1258 - uap->port.icount.tx++; 1326 + pl011_tx_char(uap, uap->port.x_char); 1259 1327 uap->port.x_char = 0; 1260 - return; 1328 + --count; 1261 1329 } 1262 1330 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 1263 1331 pl011_stop_tx(&uap->port); 1264 - return; 1332 + goto done; 1265 1333 } 1266 1334 1267 1335 /* If we are using DMA mode, try to send some characters. */ 1268 1336 if (pl011_dma_tx_irq(uap)) 1269 - return; 1337 + goto done; 1270 1338 1271 - count = uap->fifosize >> 1; 1272 - do { 1273 - writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); 1339 + while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) { 1274 1340 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 1275 - uap->port.icount.tx++; 1276 1341 if (uart_circ_empty(xmit)) 1277 1342 break; 1278 - } while (--count > 0); 1343 + } 1279 1344 1280 1345 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1281 1346 uart_write_wakeup(&uap->port); 1282 1347 1283 - if (uart_circ_empty(xmit)) 1348 + if (uart_circ_empty(xmit)) { 1284 1349 pl011_stop_tx(&uap->port); 1350 + goto done; 1351 + } 1352 + 1353 + if (unlikely(!uap->tx_irq_seen)) 1354 + schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout); 1355 + 1356 + done: 1357 + return false; 1285 1358 } 1286 1359 1287 1360 static void pl011_modem_status(struct uart_amba_port *uap) ··· 1353 1332 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); 1354 1333 1355 1334 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1335 + } 1336 + 1337 + static void pl011_tx_softirq(struct work_struct *work) 1338 + { 1339 + struct delayed_work *dwork = to_delayed_work(work); 1340 + struct uart_amba_port *uap = 1341 + container_of(dwork, struct uart_amba_port, tx_softirq_work); 1342 + 1343 + spin_lock(&uap->port.lock); 1344 + while (pl011_tx_chars(uap)) ; 1345 + spin_unlock(&uap->port.lock); 1346 + } 1347 + 1348 + static void pl011_tx_irq_seen(struct uart_amba_port *uap) 1349 + { 1350 + if (likely(uap->tx_irq_seen > 1)) 1351 + return; 1352 + 1353 + uap->tx_irq_seen++; 1354 + if (uap->tx_irq_seen < 2) 1355 + /* first TX IRQ */ 1356 + cancel_delayed_work(&uap->tx_softirq_work); 1356 1357 } 1357 1358 1358 1359 static irqreturn_t pl011_int(int irq, void *dev_id) ··· 1415 1372 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1416 1373 UART011_CTSMIS|UART011_RIMIS)) 1417 1374 pl011_modem_status(uap); 1418 - if (status & UART011_TXIS) 1375 + if (status & UART011_TXIS) { 1376 + pl011_tx_irq_seen(uap); 1419 1377 pl011_tx_chars(uap); 1378 + } 1420 1379 1421 1380 if (pass_counter-- == 0) 1422 1381 break; ··· 1622 1577 { 1623 1578 struct uart_amba_port *uap = 1624 1579 container_of(port, struct uart_amba_port, port); 1625 - unsigned int cr, lcr_h, fbrd, ibrd; 1580 + unsigned int cr; 1626 1581 int retval; 1627 1582 1628 1583 retval = pl011_hwinit(port); ··· 1640 1595 1641 1596 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); 1642 1597 1643 - /* 1644 - * Provoke TX FIFO interrupt into asserting. Taking care to preserve 1645 - * baud rate and data format specified by FBRD, IBRD and LCRH as the 1646 - * UART may already be in use as a console. 1647 - */ 1648 1598 spin_lock_irq(&uap->port.lock); 1649 - 1650 - fbrd = readw(uap->port.membase + UART011_FBRD); 1651 - ibrd = readw(uap->port.membase + UART011_IBRD); 1652 - lcr_h = readw(uap->port.membase + uap->lcrh_rx); 1653 - 1654 - cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; 1655 - writew(cr, uap->port.membase + UART011_CR); 1656 - writew(0, uap->port.membase + UART011_FBRD); 1657 - writew(1, uap->port.membase + UART011_IBRD); 1658 - pl011_write_lcr_h(uap, 0); 1659 - writew(0, uap->port.membase + UART01x_DR); 1660 - while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1661 - barrier(); 1662 - 1663 - writew(fbrd, uap->port.membase + UART011_FBRD); 1664 - writew(ibrd, uap->port.membase + UART011_IBRD); 1665 - pl011_write_lcr_h(uap, lcr_h); 1666 1599 1667 1600 /* restore RTS and DTR */ 1668 1601 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); ··· 1695 1672 container_of(port, struct uart_amba_port, port); 1696 1673 unsigned int cr; 1697 1674 1675 + cancel_delayed_work_sync(&uap->tx_softirq_work); 1676 + 1698 1677 /* 1699 1678 * disable all interrupts 1700 1679 */ 1701 1680 spin_lock_irq(&uap->port.lock); 1702 1681 uap->im = 0; 1703 1682 writew(uap->im, uap->port.membase + UART011_IMSC); 1704 - writew(0xffff, uap->port.membase + UART011_ICR); 1683 + writew(0xffff & ~UART011_TXIS, uap->port.membase + UART011_ICR); 1705 1684 spin_unlock_irq(&uap->port.lock); 1706 1685 1707 1686 pl011_dma_shutdown(uap); ··· 2243 2218 uap->port.ops = &amba_pl011_pops; 2244 2219 uap->port.flags = UPF_BOOT_AUTOCONF; 2245 2220 uap->port.line = i; 2246 - pl011_dma_probe(&dev->dev, uap); 2221 + INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq); 2247 2222 2248 2223 /* Ensure interrupts from this UART are masked and cleared */ 2249 2224 writew(0, uap->port.membase + UART011_IMSC); ··· 2258 2233 if (!amba_reg.state) { 2259 2234 ret = uart_register_driver(&amba_reg); 2260 2235 if (ret < 0) { 2261 - pr_err("Failed to register AMBA-PL011 driver\n"); 2236 + dev_err(&dev->dev, 2237 + "Failed to register AMBA-PL011 driver\n"); 2262 2238 return ret; 2263 2239 } 2264 2240 } ··· 2268 2242 if (ret) { 2269 2243 amba_ports[i] = NULL; 2270 2244 uart_unregister_driver(&amba_reg); 2271 - pl011_dma_remove(uap); 2272 2245 } 2273 2246 2274 2247 return ret;
+1 -1
drivers/tty/serial/ar933x_uart.c
··· 649 649 id = 0; 650 650 } 651 651 652 - if (id > CONFIG_SERIAL_AR933X_NR_UARTS) 652 + if (id >= CONFIG_SERIAL_AR933X_NR_UARTS) 653 653 return -EINVAL; 654 654 655 655 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+17 -12
drivers/tty/serial/atmel_serial.c
··· 855 855 spin_lock_init(&atmel_port->lock_tx); 856 856 sg_init_table(&atmel_port->sg_tx, 1); 857 857 /* UART circular tx buffer is an aligned page. */ 858 - BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); 858 + BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 859 859 sg_set_page(&atmel_port->sg_tx, 860 860 virt_to_page(port->state->xmit.buf), 861 861 UART_XMIT_SIZE, ··· 1034 1034 spin_lock_init(&atmel_port->lock_rx); 1035 1035 sg_init_table(&atmel_port->sg_rx, 1); 1036 1036 /* UART circular rx buffer is an aligned page. */ 1037 - BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); 1037 + BUG_ON(!PAGE_ALIGNED(ring->buf)); 1038 1038 sg_set_page(&atmel_port->sg_rx, 1039 1039 virt_to_page(ring->buf), 1040 - ATMEL_SERIAL_RINGSIZE, 1040 + sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1041 1041 (int)ring->buf & ~PAGE_MASK); 1042 1042 nent = dma_map_sg(port->dev, 1043 1043 &atmel_port->sg_rx, ··· 1554 1554 spin_unlock(&port->lock); 1555 1555 } 1556 1556 1557 - static int atmel_init_property(struct atmel_uart_port *atmel_port, 1557 + static void atmel_init_property(struct atmel_uart_port *atmel_port, 1558 1558 struct platform_device *pdev) 1559 1559 { 1560 1560 struct device_node *np = pdev->dev.of_node; ··· 1595 1595 atmel_port->use_dma_tx = false; 1596 1596 } 1597 1597 1598 - return 0; 1599 1598 } 1600 1599 1601 1600 static void atmel_init_rs485(struct uart_port *port, ··· 1776 1777 if (retval) 1777 1778 goto free_irq; 1778 1779 1780 + tasklet_enable(&atmel_port->tasklet); 1781 + 1779 1782 /* 1780 1783 * Initialize DMA (if necessary) 1781 1784 */ 1782 1785 atmel_init_property(atmel_port, pdev); 1786 + atmel_set_ops(port); 1783 1787 1784 1788 if (atmel_port->prepare_rx) { 1785 1789 retval = atmel_port->prepare_rx(port); ··· 1881 1879 * Clear out any scheduled tasklets before 1882 1880 * we destroy the buffers 1883 1881 */ 1882 + tasklet_disable(&atmel_port->tasklet); 1884 1883 tasklet_kill(&atmel_port->tasklet); 1885 1884 1886 1885 /* ··· 2259 2256 struct uart_port *port = &atmel_port->uart; 2260 2257 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev); 2261 2258 2262 - if (!atmel_init_property(atmel_port, pdev)) 2263 - atmel_set_ops(port); 2259 + atmel_init_property(atmel_port, pdev); 2260 + atmel_set_ops(port); 2264 2261 2265 2262 atmel_init_rs485(port, pdev); 2266 2263 ··· 2275 2272 2276 2273 tasklet_init(&atmel_port->tasklet, atmel_tasklet_func, 2277 2274 (unsigned long)port); 2275 + tasklet_disable(&atmel_port->tasklet); 2278 2276 2279 2277 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2280 2278 ··· 2585 2581 struct gpio_desc *gpiod; 2586 2582 2587 2583 p->gpios = mctrl_gpio_init(dev, 0); 2588 - if (IS_ERR_OR_NULL(p->gpios)) 2589 - return -1; 2584 + if (IS_ERR(p->gpios)) 2585 + return PTR_ERR(p->gpios); 2590 2586 2591 2587 for (i = 0; i < UART_GPIO_MAX; i++) { 2592 2588 gpiod = mctrl_gpio_to_gpiod(p->gpios, i); ··· 2639 2635 spin_lock_init(&port->lock_suspended); 2640 2636 2641 2637 ret = atmel_init_gpios(port, &pdev->dev); 2642 - if (ret < 0) 2643 - dev_err(&pdev->dev, "%s", 2644 - "Failed to initialize GPIOs. The serial port may not work as expected"); 2638 + if (ret < 0) { 2639 + dev_err(&pdev->dev, "Failed to initialize GPIOs."); 2640 + goto err; 2641 + } 2645 2642 2646 2643 ret = atmel_init_port(port, pdev); 2647 2644 if (ret)
+2 -2
drivers/tty/serial/bcm63xx_uart.c
··· 854 854 855 855 ret = uart_add_one_port(&bcm_uart_driver, port); 856 856 if (ret) { 857 - ports[pdev->id].membase = 0; 857 + ports[pdev->id].membase = NULL; 858 858 return ret; 859 859 } 860 860 platform_set_drvdata(pdev, port); ··· 868 868 port = platform_get_drvdata(pdev); 869 869 uart_remove_one_port(&bcm_uart_driver, port); 870 870 /* mark port as free */ 871 - ports[pdev->id].membase = 0; 871 + ports[pdev->id].membase = NULL; 872 872 return 0; 873 873 } 874 874
+2
drivers/tty/serial/clps711x.c
··· 501 501 platform_set_drvdata(pdev, s); 502 502 503 503 s->gpios = mctrl_gpio_init(&pdev->dev, 0); 504 + if (IS_ERR(s->gpios)) 505 + return PTR_ERR(s->gpios); 504 506 505 507 ret = uart_add_one_port(&clps711x_uart, &s->port); 506 508 if (ret)
+13 -26
drivers/tty/serial/earlycon.c
··· 54 54 return base; 55 55 } 56 56 57 - static int __init parse_options(struct earlycon_device *device, 58 - char *options) 57 + static int __init parse_options(struct earlycon_device *device, char *options) 59 58 { 60 59 struct uart_port *port = &device->port; 61 - int mmio, mmio32, length; 60 + int length; 62 61 unsigned long addr; 63 62 64 - if (!options) 65 - return -ENODEV; 63 + if (uart_parse_earlycon(options, &port->iotype, &addr, &options)) 64 + return -EINVAL; 66 65 67 - mmio = !strncmp(options, "mmio,", 5); 68 - mmio32 = !strncmp(options, "mmio32,", 7); 69 - if (mmio || mmio32) { 70 - port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32); 71 - options += mmio ? 5 : 7; 72 - addr = simple_strtoul(options, NULL, 0); 66 + switch (port->iotype) { 67 + case UPIO_MEM32: 68 + port->regshift = 2; /* fall-through */ 69 + case UPIO_MEM: 73 70 port->mapbase = addr; 74 - if (mmio32) 75 - port->regshift = 2; 76 - } else if (!strncmp(options, "io,", 3)) { 77 - port->iotype = UPIO_PORT; 78 - options += 3; 79 - addr = simple_strtoul(options, NULL, 0); 71 + break; 72 + case UPIO_PORT: 80 73 port->iobase = addr; 81 - mmio = 0; 82 - } else if (!strncmp(options, "0x", 2)) { 83 - port->iotype = UPIO_MEM; 84 - addr = simple_strtoul(options, NULL, 0); 85 - port->mapbase = addr; 86 - } else { 74 + break; 75 + default: 87 76 return -EINVAL; 88 77 } 89 78 90 79 port->uartclk = BASE_BAUD * 16; 91 80 92 - options = strchr(options, ','); 93 81 if (options) { 94 - options++; 95 82 device->baud = simple_strtoul(options, NULL, 0); 96 83 length = min(strcspn(options, " ") + 1, 97 84 (size_t)(sizeof(device->options))); ··· 87 100 88 101 if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM32) 89 102 pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n", 90 - mmio32 ? "32" : "", 103 + (port->iotype == UPIO_MEM32) ? "32" : "", 91 104 (unsigned long long)port->mapbase, 92 105 device->options); 93 106 else
+121 -196
drivers/tty/serial/imx.c
··· 1 1 /* 2 - * Driver for Motorola IMX serial ports 2 + * Driver for Motorola/Freescale IMX serial ports 3 3 * 4 - * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 4 + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 5 * 6 - * Author: Sascha Hauer <sascha@saschahauer.de> 7 - * Copyright (C) 2004 Pengutronix 8 - * 9 - * Copyright (C) 2009 emlix GmbH 10 - * Author: Fabian Godehardt (added IrDA support for iMX) 6 + * Author: Sascha Hauer <sascha@saschahauer.de> 7 + * Copyright (C) 2004 Pengutronix 11 8 * 12 9 * This program is free software; you can redistribute it and/or modify 13 10 * it under the terms of the GNU General Public License as published by ··· 15 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 20 * GNU General Public License for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License 20 - * along with this program; if not, write to the Free Software 21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 - * 23 - * [29-Mar-2005] Mike Lee 24 - * Added hardware handshake 25 21 */ 26 22 27 23 #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) ··· 179 189 180 190 #define UART_NR 8 181 191 182 - /* i.mx21 type uart runs on all i.mx except i.mx1 */ 192 + /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ 183 193 enum imx_uart_type { 184 194 IMX1_UART, 185 195 IMX21_UART, ··· 196 206 struct uart_port port; 197 207 struct timer_list timer; 198 208 unsigned int old_status; 199 - int txirq, rxirq, rtsirq; 200 209 unsigned int have_rtscts:1; 201 210 unsigned int dte_mode:1; 202 - unsigned int use_irda:1; 203 211 unsigned int irda_inv_rx:1; 204 212 unsigned int irda_inv_tx:1; 205 213 unsigned short trcv_delay; /* transceiver delay */ ··· 223 235 unsigned int ucr2; 224 236 unsigned int ucr3; 225 237 }; 226 - 227 - #ifdef CONFIG_IRDA 228 - #define USE_IRDA(sport) ((sport)->use_irda) 229 - #else 230 - #define USE_IRDA(sport) (0) 231 - #endif 232 238 233 239 static struct imx_uart_data imx_uart_devdata[] = { 234 240 [IMX1_UART] = { ··· 255 273 }; 256 274 MODULE_DEVICE_TABLE(platform, imx_uart_devtype); 257 275 258 - static struct of_device_id imx_uart_dt_ids[] = { 276 + static const struct of_device_id imx_uart_dt_ids[] = { 259 277 { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], }, 260 278 { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], }, 261 279 { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], }, ··· 358 376 struct imx_port *sport = (struct imx_port *)port; 359 377 unsigned long temp; 360 378 361 - if (USE_IRDA(sport)) { 362 - /* half duplex - wait for end of transmission */ 363 - int n = 256; 364 - while ((--n > 0) && 365 - !(readl(sport->port.membase + USR2) & USR2_TXDC)) { 366 - udelay(5); 367 - barrier(); 368 - } 369 - /* 370 - * irda transceiver - wait a bit more to avoid 371 - * cutoff, hardware dependent 372 - */ 373 - udelay(sport->trcv_delay); 374 - 375 - /* 376 - * half duplex - reactivate receive mode, 377 - * flush receive pipe echo crap 378 - */ 379 - if (readl(sport->port.membase + USR2) & USR2_TXDC) { 380 - temp = readl(sport->port.membase + UCR1); 381 - temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN); 382 - writel(temp, sport->port.membase + UCR1); 383 - 384 - temp = readl(sport->port.membase + UCR4); 385 - temp &= ~(UCR4_TCEN); 386 - writel(temp, sport->port.membase + UCR4); 387 - 388 - while (readl(sport->port.membase + URXD0) & 389 - URXD_CHARRDY) 390 - barrier(); 391 - 392 - temp = readl(sport->port.membase + UCR1); 393 - temp |= UCR1_RRDYEN; 394 - writel(temp, sport->port.membase + UCR1); 395 - 396 - temp = readl(sport->port.membase + UCR4); 397 - temp |= UCR4_DREN; 398 - writel(temp, sport->port.membase + UCR4); 399 - } 400 - return; 401 - } 402 - 403 379 /* 404 380 * We are maybe in the SMP context, so if the DMA TX thread is running 405 381 * on other cpu, we have to wait for it to finish. ··· 365 425 if (sport->dma_is_enabled && sport->dma_is_txing) 366 426 return; 367 427 368 - temp = readl(sport->port.membase + UCR1); 369 - writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); 428 + temp = readl(port->membase + UCR1); 429 + writel(temp & ~UCR1_TXMPTYEN, port->membase + UCR1); 430 + 431 + /* in rs485 mode disable transmitter if shifter is empty */ 432 + if (port->rs485.flags & SER_RS485_ENABLED && 433 + readl(port->membase + USR2) & USR2_TXDC) { 434 + temp = readl(port->membase + UCR2); 435 + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 436 + temp &= ~UCR2_CTS; 437 + else 438 + temp |= UCR2_CTS; 439 + writel(temp, port->membase + UCR2); 440 + 441 + temp = readl(port->membase + UCR4); 442 + temp &= ~UCR4_TCEN; 443 + writel(temp, port->membase + UCR4); 444 + } 370 445 } 371 446 372 447 /* ··· 575 620 struct imx_port *sport = (struct imx_port *)port; 576 621 unsigned long temp; 577 622 578 - if (USE_IRDA(sport)) { 579 - /* half duplex in IrDA mode; have to disable receive mode */ 580 - temp = readl(sport->port.membase + UCR4); 581 - temp &= ~(UCR4_DREN); 582 - writel(temp, sport->port.membase + UCR4); 623 + if (port->rs485.flags & SER_RS485_ENABLED) { 624 + /* enable transmitter and shifter empty irq */ 625 + temp = readl(port->membase + UCR2); 626 + if (port->rs485.flags & SER_RS485_RTS_ON_SEND) 627 + temp &= ~UCR2_CTS; 628 + else 629 + temp |= UCR2_CTS; 630 + writel(temp, port->membase + UCR2); 583 631 584 - temp = readl(sport->port.membase + UCR1); 585 - temp &= ~(UCR1_RRDYEN); 586 - writel(temp, sport->port.membase + UCR1); 632 + temp = readl(port->membase + UCR4); 633 + temp |= UCR4_TCEN; 634 + writel(temp, port->membase + UCR4); 587 635 } 588 636 589 637 if (!sport->dma_is_enabled) { 590 638 temp = readl(sport->port.membase + UCR1); 591 639 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1); 592 - } 593 - 594 - if (USE_IRDA(sport)) { 595 - temp = readl(sport->port.membase + UCR1); 596 - temp |= UCR1_TRDYEN; 597 - writel(temp, sport->port.membase + UCR1); 598 - 599 - temp = readl(sport->port.membase + UCR4); 600 - temp |= UCR4_TCEN; 601 - writel(temp, sport->port.membase + UCR4); 602 640 } 603 641 604 642 if (sport->dma_is_enabled) { ··· 744 796 unsigned int sts2; 745 797 746 798 sts = readl(sport->port.membase + USR1); 799 + sts2 = readl(sport->port.membase + USR2); 747 800 748 801 if (sts & USR1_RRDY) { 749 802 if (sport->dma_is_enabled) ··· 753 804 imx_rxint(irq, dev_id); 754 805 } 755 806 756 - if (sts & USR1_TRDY && 757 - readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) 807 + if ((sts & USR1_TRDY && 808 + readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) || 809 + (sts2 & USR2_TXDC && 810 + readl(sport->port.membase + UCR4) & UCR4_TCEN)) 758 811 imx_txint(irq, dev_id); 759 812 760 813 if (sts & USR1_RTSD) ··· 765 814 if (sts & USR1_AWAKE) 766 815 writel(USR1_AWAKE, sport->port.membase + USR1); 767 816 768 - sts2 = readl(sport->port.membase + USR2); 769 817 if (sts2 & USR2_ORE) { 770 818 dev_err(sport->port.dev, "Rx FIFO overrun\n"); 771 819 sport->port.icount.overrun++; 772 - writel(sts2 | USR2_ORE, sport->port.membase + USR2); 820 + writel(USR2_ORE, sport->port.membase + USR2); 773 821 } 774 822 775 823 return IRQ_HANDLED; ··· 816 866 struct imx_port *sport = (struct imx_port *)port; 817 867 unsigned long temp; 818 868 819 - temp = readl(sport->port.membase + UCR2) & ~(UCR2_CTS | UCR2_CTSC); 820 - if (mctrl & TIOCM_RTS) 821 - temp |= UCR2_CTS | UCR2_CTSC; 822 - 823 - writel(temp, sport->port.membase + UCR2); 869 + if (!(port->rs485.flags & SER_RS485_ENABLED)) { 870 + temp = readl(sport->port.membase + UCR2); 871 + temp &= ~(UCR2_CTS | UCR2_CTSC); 872 + if (mctrl & TIOCM_RTS) 873 + temp |= UCR2_CTS | UCR2_CTSC; 874 + writel(temp, sport->port.membase + UCR2); 875 + } 824 876 825 877 temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP; 826 878 if (mctrl & TIOCM_LOOP) ··· 1108 1156 */ 1109 1157 temp = readl(sport->port.membase + UCR4); 1110 1158 1111 - if (USE_IRDA(sport)) 1112 - temp |= UCR4_IRSC; 1113 - 1114 1159 /* set the trigger level for CTS */ 1115 1160 temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF); 1116 1161 temp |= CTSTL << UCR4_CTSTL_SHF; ··· 1130 1181 imx_uart_dma_init(sport); 1131 1182 1132 1183 spin_lock_irqsave(&sport->port.lock, flags); 1184 + 1133 1185 /* 1134 1186 * Finally, clear and enable interrupts 1135 1187 */ 1136 1188 writel(USR1_RTSD, sport->port.membase + USR1); 1189 + writel(USR2_ORE, sport->port.membase + USR2); 1137 1190 1138 1191 if (sport->dma_is_inited && !sport->dma_is_enabled) 1139 1192 imx_enable_dma(sport); ··· 1143 1192 temp = readl(sport->port.membase + UCR1); 1144 1193 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 1145 1194 1146 - if (USE_IRDA(sport)) { 1147 - temp |= UCR1_IREN; 1148 - temp &= ~(UCR1_RTSDEN); 1149 - } 1150 - 1151 1195 writel(temp, sport->port.membase + UCR1); 1152 - 1153 - /* Clear any pending ORE flag before enabling interrupt */ 1154 - temp = readl(sport->port.membase + USR2); 1155 - writel(temp | USR2_ORE, sport->port.membase + USR2); 1156 1196 1157 1197 temp = readl(sport->port.membase + UCR4); 1158 1198 temp |= UCR4_OREN; ··· 1161 1219 writel(temp, sport->port.membase + UCR3); 1162 1220 } 1163 1221 1164 - if (USE_IRDA(sport)) { 1165 - temp = readl(sport->port.membase + UCR4); 1166 - if (sport->irda_inv_rx) 1167 - temp |= UCR4_INVR; 1168 - else 1169 - temp &= ~(UCR4_INVR); 1170 - writel(temp | UCR4_DREN, sport->port.membase + UCR4); 1171 - 1172 - temp = readl(sport->port.membase + UCR3); 1173 - if (sport->irda_inv_tx) 1174 - temp |= UCR3_INVT; 1175 - else 1176 - temp &= ~(UCR3_INVT); 1177 - writel(temp, sport->port.membase + UCR3); 1178 - } 1179 - 1180 1222 /* 1181 1223 * Enable modem status interrupts 1182 1224 */ 1183 1225 imx_enable_ms(&sport->port); 1184 1226 spin_unlock_irqrestore(&sport->port.lock, flags); 1185 - 1186 - if (USE_IRDA(sport)) { 1187 - struct imxuart_platform_data *pdata; 1188 - pdata = dev_get_platdata(sport->port.dev); 1189 - sport->irda_inv_rx = pdata->irda_inv_rx; 1190 - sport->irda_inv_tx = pdata->irda_inv_tx; 1191 - sport->trcv_delay = pdata->transceiver_delay; 1192 - if (pdata->irda_enable) 1193 - pdata->irda_enable(1); 1194 - } 1195 1227 1196 1228 return 0; 1197 1229 } ··· 1202 1286 writel(temp, sport->port.membase + UCR2); 1203 1287 spin_unlock_irqrestore(&sport->port.lock, flags); 1204 1288 1205 - if (USE_IRDA(sport)) { 1206 - struct imxuart_platform_data *pdata; 1207 - pdata = dev_get_platdata(sport->port.dev); 1208 - if (pdata->irda_enable) 1209 - pdata->irda_enable(0); 1210 - } 1211 - 1212 1289 /* 1213 1290 * Stop our timer. 1214 1291 */ ··· 1214 1305 spin_lock_irqsave(&sport->port.lock, flags); 1215 1306 temp = readl(sport->port.membase + UCR1); 1216 1307 temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); 1217 - if (USE_IRDA(sport)) 1218 - temp &= ~(UCR1_IREN); 1219 1308 1220 1309 writel(temp, sport->port.membase + UCR1); 1221 1310 spin_unlock_irqrestore(&sport->port.lock, flags); ··· 1227 1320 struct imx_port *sport = (struct imx_port *)port; 1228 1321 struct scatterlist *sgl = &sport->tx_sgl[0]; 1229 1322 unsigned long temp; 1230 - int i = 100, ubir, ubmr, ubrc, uts; 1323 + int i = 100, ubir, ubmr, uts; 1231 1324 1232 1325 if (!sport->dma_chan_tx) 1233 1326 return; ··· 1252 1345 */ 1253 1346 ubir = readl(sport->port.membase + UBIR); 1254 1347 ubmr = readl(sport->port.membase + UBMR); 1255 - ubrc = readl(sport->port.membase + UBRC); 1256 1348 uts = readl(sport->port.membase + IMX21_UTS); 1257 1349 1258 1350 temp = readl(sport->port.membase + UCR2); ··· 1264 1358 /* Restore the registers */ 1265 1359 writel(ubir, sport->port.membase + UBIR); 1266 1360 writel(ubmr, sport->port.membase + UBMR); 1267 - writel(ubrc, sport->port.membase + UBRC); 1268 1361 writel(uts, sport->port.membase + IMX21_UTS); 1269 1362 } 1270 1363 ··· 1278 1373 unsigned int div, ufcr; 1279 1374 unsigned long num, denom; 1280 1375 uint64_t tdiv64; 1281 - 1282 - /* 1283 - * If we don't support modem control lines, don't allow 1284 - * these to be set. 1285 - */ 1286 - if (0) { 1287 - termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR); 1288 - termios->c_cflag |= CLOCAL; 1289 - } 1290 1376 1291 1377 /* 1292 1378 * We only support CS7 and CS8. ··· 1297 1401 if (termios->c_cflag & CRTSCTS) { 1298 1402 if (sport->have_rtscts) { 1299 1403 ucr2 &= ~UCR2_IRTS; 1300 - ucr2 |= UCR2_CTSC; 1404 + 1405 + if (port->rs485.flags & SER_RS485_ENABLED) 1406 + /* 1407 + * RTS is mandatory for rs485 operation, so keep 1408 + * it under manual control and keep transmitter 1409 + * disabled. 1410 + */ 1411 + if (!(port->rs485.flags & 1412 + SER_RS485_RTS_AFTER_SEND)) 1413 + ucr2 |= UCR2_CTS; 1414 + else 1415 + ucr2 |= UCR2_CTSC; 1416 + 1301 1417 } else { 1302 1418 termios->c_cflag &= ~CRTSCTS; 1303 1419 } 1304 - } 1420 + } else if (port->rs485.flags & SER_RS485_ENABLED) 1421 + /* disable transmitter */ 1422 + if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND)) 1423 + ucr2 |= UCR2_CTS; 1305 1424 1306 1425 if (termios->c_cflag & CSTOPB) 1307 1426 ucr2 |= UCR2_STPB; ··· 1382 1471 sport->port.membase + UCR2); 1383 1472 old_txrxen &= (UCR2_TXEN | UCR2_RXEN); 1384 1473 1385 - if (USE_IRDA(sport)) { 1386 - /* 1387 - * use maximum available submodule frequency to 1388 - * avoid missing short pulses due to low sampling rate 1389 - */ 1390 - div = 1; 1391 - } else { 1392 - /* custom-baudrate handling */ 1393 - div = sport->port.uartclk / (baud * 16); 1394 - if (baud == 38400 && quot != div) 1395 - baud = sport->port.uartclk / (quot * 16); 1474 + /* custom-baudrate handling */ 1475 + div = sport->port.uartclk / (baud * 16); 1476 + if (baud == 38400 && quot != div) 1477 + baud = sport->port.uartclk / (quot * 16); 1396 1478 1397 - div = sport->port.uartclk / (baud * 16); 1398 - if (div > 7) 1399 - div = 7; 1400 - if (!div) 1401 - div = 1; 1402 - } 1479 + div = sport->port.uartclk / (baud * 16); 1480 + if (div > 7) 1481 + div = 7; 1482 + if (!div) 1483 + div = 1; 1403 1484 1404 1485 rational_best_approximation(16 * div * baud, sport->port.uartclk, 1405 1486 1 << 16, 1 << 16, &num, &denom); ··· 1537 1634 } while (~status & USR2_TXDC); 1538 1635 } 1539 1636 #endif 1637 + 1638 + static int imx_rs485_config(struct uart_port *port, 1639 + struct serial_rs485 *rs485conf) 1640 + { 1641 + struct imx_port *sport = (struct imx_port *)port; 1642 + 1643 + /* unimplemented */ 1644 + rs485conf->delay_rts_before_send = 0; 1645 + rs485conf->delay_rts_after_send = 0; 1646 + rs485conf->flags |= SER_RS485_RX_DURING_TX; 1647 + 1648 + /* RTS is required to control the transmitter */ 1649 + if (!sport->have_rtscts) 1650 + rs485conf->flags &= ~SER_RS485_ENABLED; 1651 + 1652 + if (rs485conf->flags & SER_RS485_ENABLED) { 1653 + unsigned long temp; 1654 + 1655 + /* disable transmitter */ 1656 + temp = readl(sport->port.membase + UCR2); 1657 + temp &= ~UCR2_CTSC; 1658 + if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 1659 + temp &= ~UCR2_CTS; 1660 + else 1661 + temp |= UCR2_CTS; 1662 + writel(temp, sport->port.membase + UCR2); 1663 + } 1664 + 1665 + port->rs485 = *rs485conf; 1666 + 1667 + return 0; 1668 + } 1540 1669 1541 1670 static struct uart_ops imx_pops = { 1542 1671 .tx_empty = imx_tx_empty, ··· 1862 1927 if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) 1863 1928 sport->have_rtscts = 1; 1864 1929 1865 - if (of_get_property(np, "fsl,irda-mode", NULL)) 1866 - sport->use_irda = 1; 1867 - 1868 1930 if (of_get_property(np, "fsl,dte-mode", NULL)) 1869 1931 sport->dte_mode = 1; 1870 1932 ··· 1890 1958 1891 1959 if (pdata->flags & IMXUART_HAVE_RTSCTS) 1892 1960 sport->have_rtscts = 1; 1893 - 1894 - if (pdata->flags & IMXUART_IRDA) 1895 - sport->use_irda = 1; 1896 1961 } 1897 1962 1898 1963 static int serial_imx_probe(struct platform_device *pdev) ··· 1898 1969 void __iomem *base; 1899 1970 int ret = 0; 1900 1971 struct resource *res; 1972 + int txirq, rxirq, rtsirq; 1901 1973 1902 1974 sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); 1903 1975 if (!sport) ··· 1915 1985 if (IS_ERR(base)) 1916 1986 return PTR_ERR(base); 1917 1987 1988 + rxirq = platform_get_irq(pdev, 0); 1989 + txirq = platform_get_irq(pdev, 1); 1990 + rtsirq = platform_get_irq(pdev, 2); 1991 + 1918 1992 sport->port.dev = &pdev->dev; 1919 1993 sport->port.mapbase = res->start; 1920 1994 sport->port.membase = base; 1921 1995 sport->port.type = PORT_IMX, 1922 1996 sport->port.iotype = UPIO_MEM; 1923 - sport->port.irq = platform_get_irq(pdev, 0); 1924 - sport->rxirq = platform_get_irq(pdev, 0); 1925 - sport->txirq = platform_get_irq(pdev, 1); 1926 - sport->rtsirq = platform_get_irq(pdev, 2); 1997 + sport->port.irq = rxirq; 1927 1998 sport->port.fifosize = 32; 1928 1999 sport->port.ops = &imx_pops; 2000 + sport->port.rs485_config = imx_rs485_config; 2001 + sport->port.rs485.flags = 2002 + SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX; 1929 2003 sport->port.flags = UPF_BOOT_AUTOCONF; 1930 2004 init_timer(&sport->timer); 1931 2005 sport->timer.function = imx_timeout; ··· 1955 2021 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later 1956 2022 * chips only have one interrupt. 1957 2023 */ 1958 - if (sport->txirq > 0) { 1959 - ret = devm_request_irq(&pdev->dev, sport->rxirq, imx_rxint, 0, 2024 + if (txirq > 0) { 2025 + ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0, 1960 2026 dev_name(&pdev->dev), sport); 1961 2027 if (ret) 1962 2028 return ret; 1963 2029 1964 - ret = devm_request_irq(&pdev->dev, sport->txirq, imx_txint, 0, 2030 + ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0, 1965 2031 dev_name(&pdev->dev), sport); 1966 2032 if (ret) 1967 2033 return ret; 1968 - 1969 - /* do not use RTS IRQ on IrDA */ 1970 - if (!USE_IRDA(sport)) { 1971 - ret = devm_request_irq(&pdev->dev, sport->rtsirq, 1972 - imx_rtsint, 0, 1973 - dev_name(&pdev->dev), sport); 1974 - if (ret) 1975 - return ret; 1976 - } 1977 2034 } else { 1978 - ret = devm_request_irq(&pdev->dev, sport->port.irq, imx_int, 0, 2035 + ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0, 1979 2036 dev_name(&pdev->dev), sport); 1980 2037 if (ret) 1981 2038 return ret;
+1 -1
drivers/tty/serial/max3100.c
··· 782 782 pdata = dev_get_platdata(&spi->dev); 783 783 max3100s[i]->crystal = pdata->crystal; 784 784 max3100s[i]->loopback = pdata->loopback; 785 - max3100s[i]->poll_time = pdata->poll_time * HZ / 1000; 785 + max3100s[i]->poll_time = msecs_to_jiffies(pdata->poll_time); 786 786 if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0) 787 787 max3100s[i]->poll_time = 1; 788 788 max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
-1505
drivers/tty/serial/mfd.c
··· 1 - /* 2 - * mfd.c: driver for High Speed UART device of Intel Medfield platform 3 - * 4 - * Refer pxa.c, 8250.c and some other drivers in drivers/serial/ 5 - * 6 - * (C) Copyright 2010 Intel Corporation 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License 10 - * as published by the Free Software Foundation; version 2 11 - * of the License. 12 - */ 13 - 14 - /* Notes: 15 - * 1. DMA channel allocation: 0/1 channel are assigned to port 0, 16 - * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans 17 - * are used for RX, odd chans for TX 18 - * 19 - * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always 20 - * asserted, only when the HW is reset the DDCD and DDSR will 21 - * be triggered 22 - */ 23 - 24 - #if defined(CONFIG_SERIAL_MFD_HSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 25 - #define SUPPORT_SYSRQ 26 - #endif 27 - 28 - #include <linux/module.h> 29 - #include <linux/init.h> 30 - #include <linux/console.h> 31 - #include <linux/sysrq.h> 32 - #include <linux/slab.h> 33 - #include <linux/serial_reg.h> 34 - #include <linux/circ_buf.h> 35 - #include <linux/delay.h> 36 - #include <linux/interrupt.h> 37 - #include <linux/tty.h> 38 - #include <linux/tty_flip.h> 39 - #include <linux/serial_core.h> 40 - #include <linux/serial_mfd.h> 41 - #include <linux/dma-mapping.h> 42 - #include <linux/pci.h> 43 - #include <linux/nmi.h> 44 - #include <linux/io.h> 45 - #include <linux/debugfs.h> 46 - #include <linux/pm_runtime.h> 47 - 48 - #define HSU_DMA_BUF_SIZE 2048 49 - 50 - #define chan_readl(chan, offset) readl(chan->reg + offset) 51 - #define chan_writel(chan, offset, val) writel(val, chan->reg + offset) 52 - 53 - #define mfd_readl(obj, offset) readl(obj->reg + offset) 54 - #define mfd_writel(obj, offset, val) writel(val, obj->reg + offset) 55 - 56 - static int hsu_dma_enable; 57 - module_param(hsu_dma_enable, int, 0); 58 - MODULE_PARM_DESC(hsu_dma_enable, 59 - "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode."); 60 - 61 - struct hsu_dma_buffer { 62 - u8 *buf; 63 - dma_addr_t dma_addr; 64 - u32 dma_size; 65 - u32 ofs; 66 - }; 67 - 68 - struct hsu_dma_chan { 69 - u32 id; 70 - enum dma_data_direction dirt; 71 - struct uart_hsu_port *uport; 72 - void __iomem *reg; 73 - }; 74 - 75 - struct uart_hsu_port { 76 - struct uart_port port; 77 - unsigned char ier; 78 - unsigned char lcr; 79 - unsigned char mcr; 80 - unsigned int lsr_break_flag; 81 - char name[12]; 82 - int index; 83 - struct device *dev; 84 - 85 - struct hsu_dma_chan *txc; 86 - struct hsu_dma_chan *rxc; 87 - struct hsu_dma_buffer txbuf; 88 - struct hsu_dma_buffer rxbuf; 89 - int use_dma; /* flag for DMA/PIO */ 90 - int running; 91 - int dma_tx_on; 92 - }; 93 - 94 - /* Top level data structure of HSU */ 95 - struct hsu_port { 96 - void __iomem *reg; 97 - unsigned long paddr; 98 - unsigned long iolen; 99 - u32 irq; 100 - 101 - struct uart_hsu_port port[3]; 102 - struct hsu_dma_chan chans[10]; 103 - 104 - struct dentry *debugfs; 105 - }; 106 - 107 - static inline unsigned int serial_in(struct uart_hsu_port *up, int offset) 108 - { 109 - unsigned int val; 110 - 111 - if (offset > UART_MSR) { 112 - offset <<= 2; 113 - val = readl(up->port.membase + offset); 114 - } else 115 - val = (unsigned int)readb(up->port.membase + offset); 116 - 117 - return val; 118 - } 119 - 120 - static inline void serial_out(struct uart_hsu_port *up, int offset, int value) 121 - { 122 - if (offset > UART_MSR) { 123 - offset <<= 2; 124 - writel(value, up->port.membase + offset); 125 - } else { 126 - unsigned char val = value & 0xff; 127 - writeb(val, up->port.membase + offset); 128 - } 129 - } 130 - 131 - #ifdef CONFIG_DEBUG_FS 132 - 133 - #define HSU_REGS_BUFSIZE 1024 134 - 135 - 136 - static ssize_t port_show_regs(struct file *file, char __user *user_buf, 137 - size_t count, loff_t *ppos) 138 - { 139 - struct uart_hsu_port *up = file->private_data; 140 - char *buf; 141 - u32 len = 0; 142 - ssize_t ret; 143 - 144 - buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL); 145 - if (!buf) 146 - return 0; 147 - 148 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 149 - "MFD HSU port[%d] regs:\n", up->index); 150 - 151 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 152 - "=================================\n"); 153 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 154 - "IER: \t\t0x%08x\n", serial_in(up, UART_IER)); 155 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 156 - "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR)); 157 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 158 - "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR)); 159 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 160 - "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR)); 161 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 162 - "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR)); 163 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 164 - "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR)); 165 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 166 - "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR)); 167 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 168 - "PS: \t\t0x%08x\n", serial_in(up, UART_PS)); 169 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 170 - "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL)); 171 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 172 - "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV)); 173 - 174 - if (len > HSU_REGS_BUFSIZE) 175 - len = HSU_REGS_BUFSIZE; 176 - 177 - ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); 178 - kfree(buf); 179 - return ret; 180 - } 181 - 182 - static ssize_t dma_show_regs(struct file *file, char __user *user_buf, 183 - size_t count, loff_t *ppos) 184 - { 185 - struct hsu_dma_chan *chan = file->private_data; 186 - char *buf; 187 - u32 len = 0; 188 - ssize_t ret; 189 - 190 - buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL); 191 - if (!buf) 192 - return 0; 193 - 194 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 195 - "MFD HSU DMA channel [%d] regs:\n", chan->id); 196 - 197 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 198 - "=================================\n"); 199 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 200 - "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR)); 201 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 202 - "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR)); 203 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 204 - "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR)); 205 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 206 - "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR)); 207 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 208 - "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR)); 209 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 210 - "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR)); 211 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 212 - "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR)); 213 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 214 - "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR)); 215 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 216 - "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR)); 217 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 218 - "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR)); 219 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 220 - "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR)); 221 - len += snprintf(buf + len, HSU_REGS_BUFSIZE - len, 222 - "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR)); 223 - 224 - if (len > HSU_REGS_BUFSIZE) 225 - len = HSU_REGS_BUFSIZE; 226 - 227 - ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); 228 - kfree(buf); 229 - return ret; 230 - } 231 - 232 - static const struct file_operations port_regs_ops = { 233 - .owner = THIS_MODULE, 234 - .open = simple_open, 235 - .read = port_show_regs, 236 - .llseek = default_llseek, 237 - }; 238 - 239 - static const struct file_operations dma_regs_ops = { 240 - .owner = THIS_MODULE, 241 - .open = simple_open, 242 - .read = dma_show_regs, 243 - .llseek = default_llseek, 244 - }; 245 - 246 - static int hsu_debugfs_init(struct hsu_port *hsu) 247 - { 248 - int i; 249 - char name[32]; 250 - 251 - hsu->debugfs = debugfs_create_dir("hsu", NULL); 252 - if (!hsu->debugfs) 253 - return -ENOMEM; 254 - 255 - for (i = 0; i < 3; i++) { 256 - snprintf(name, sizeof(name), "port_%d_regs", i); 257 - debugfs_create_file(name, S_IFREG | S_IRUGO, 258 - hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops); 259 - } 260 - 261 - for (i = 0; i < 6; i++) { 262 - snprintf(name, sizeof(name), "dma_chan_%d_regs", i); 263 - debugfs_create_file(name, S_IFREG | S_IRUGO, 264 - hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops); 265 - } 266 - 267 - return 0; 268 - } 269 - 270 - static void hsu_debugfs_remove(struct hsu_port *hsu) 271 - { 272 - if (hsu->debugfs) 273 - debugfs_remove_recursive(hsu->debugfs); 274 - } 275 - 276 - #else 277 - static inline int hsu_debugfs_init(struct hsu_port *hsu) 278 - { 279 - return 0; 280 - } 281 - 282 - static inline void hsu_debugfs_remove(struct hsu_port *hsu) 283 - { 284 - } 285 - #endif /* CONFIG_DEBUG_FS */ 286 - 287 - static void serial_hsu_enable_ms(struct uart_port *port) 288 - { 289 - struct uart_hsu_port *up = 290 - container_of(port, struct uart_hsu_port, port); 291 - 292 - up->ier |= UART_IER_MSI; 293 - serial_out(up, UART_IER, up->ier); 294 - } 295 - 296 - static void hsu_dma_tx(struct uart_hsu_port *up) 297 - { 298 - struct circ_buf *xmit = &up->port.state->xmit; 299 - struct hsu_dma_buffer *dbuf = &up->txbuf; 300 - int count; 301 - 302 - /* test_and_set_bit may be better, but anyway it's in lock protected mode */ 303 - if (up->dma_tx_on) 304 - return; 305 - 306 - /* Update the circ buf info */ 307 - xmit->tail += dbuf->ofs; 308 - xmit->tail &= UART_XMIT_SIZE - 1; 309 - 310 - up->port.icount.tx += dbuf->ofs; 311 - dbuf->ofs = 0; 312 - 313 - /* Disable the channel */ 314 - chan_writel(up->txc, HSU_CH_CR, 0x0); 315 - 316 - if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) { 317 - dma_sync_single_for_device(up->port.dev, 318 - dbuf->dma_addr, 319 - dbuf->dma_size, 320 - DMA_TO_DEVICE); 321 - 322 - count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 323 - dbuf->ofs = count; 324 - 325 - /* Reprogram the channel */ 326 - chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail); 327 - chan_writel(up->txc, HSU_CH_D0TSR, count); 328 - 329 - /* Reenable the channel */ 330 - chan_writel(up->txc, HSU_CH_DCR, 0x1 331 - | (0x1 << 8) 332 - | (0x1 << 16) 333 - | (0x1 << 24)); 334 - up->dma_tx_on = 1; 335 - chan_writel(up->txc, HSU_CH_CR, 0x1); 336 - } 337 - 338 - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 339 - uart_write_wakeup(&up->port); 340 - } 341 - 342 - /* The buffer is already cache coherent */ 343 - static void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, 344 - struct hsu_dma_buffer *dbuf) 345 - { 346 - dbuf->ofs = 0; 347 - 348 - chan_writel(rxc, HSU_CH_BSR, 32); 349 - chan_writel(rxc, HSU_CH_MOTSR, 4); 350 - 351 - chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr); 352 - chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size); 353 - chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8) 354 - | (0x1 << 16) 355 - | (0x1 << 24) /* timeout bit, see HSU Errata 1 */ 356 - ); 357 - chan_writel(rxc, HSU_CH_CR, 0x3); 358 - } 359 - 360 - /* Protected by spin_lock_irqsave(port->lock) */ 361 - static void serial_hsu_start_tx(struct uart_port *port) 362 - { 363 - struct uart_hsu_port *up = 364 - container_of(port, struct uart_hsu_port, port); 365 - 366 - if (up->use_dma) { 367 - hsu_dma_tx(up); 368 - } else if (!(up->ier & UART_IER_THRI)) { 369 - up->ier |= UART_IER_THRI; 370 - serial_out(up, UART_IER, up->ier); 371 - } 372 - } 373 - 374 - static void serial_hsu_stop_tx(struct uart_port *port) 375 - { 376 - struct uart_hsu_port *up = 377 - container_of(port, struct uart_hsu_port, port); 378 - struct hsu_dma_chan *txc = up->txc; 379 - 380 - if (up->use_dma) 381 - chan_writel(txc, HSU_CH_CR, 0x0); 382 - else if (up->ier & UART_IER_THRI) { 383 - up->ier &= ~UART_IER_THRI; 384 - serial_out(up, UART_IER, up->ier); 385 - } 386 - } 387 - 388 - /* This is always called in spinlock protected mode, so 389 - * modify timeout timer is safe here */ 390 - static void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts, 391 - unsigned long *flags) 392 - { 393 - struct hsu_dma_buffer *dbuf = &up->rxbuf; 394 - struct hsu_dma_chan *chan = up->rxc; 395 - struct uart_port *port = &up->port; 396 - struct tty_port *tport = &port->state->port; 397 - int count; 398 - 399 - /* 400 - * First need to know how many is already transferred, 401 - * then check if its a timeout DMA irq, and return 402 - * the trail bytes out, push them up and reenable the 403 - * channel 404 - */ 405 - 406 - /* Timeout IRQ, need wait some time, see Errata 2 */ 407 - if (int_sts & 0xf00) 408 - udelay(2); 409 - 410 - /* Stop the channel */ 411 - chan_writel(chan, HSU_CH_CR, 0x0); 412 - 413 - count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr; 414 - if (!count) { 415 - /* Restart the channel before we leave */ 416 - chan_writel(chan, HSU_CH_CR, 0x3); 417 - return; 418 - } 419 - 420 - dma_sync_single_for_cpu(port->dev, dbuf->dma_addr, 421 - dbuf->dma_size, DMA_FROM_DEVICE); 422 - 423 - /* 424 - * Head will only wrap around when we recycle 425 - * the DMA buffer, and when that happens, we 426 - * explicitly set tail to 0. So head will 427 - * always be greater than tail. 428 - */ 429 - tty_insert_flip_string(tport, dbuf->buf, count); 430 - port->icount.rx += count; 431 - 432 - dma_sync_single_for_device(up->port.dev, dbuf->dma_addr, 433 - dbuf->dma_size, DMA_FROM_DEVICE); 434 - 435 - /* Reprogram the channel */ 436 - chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr); 437 - chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size); 438 - chan_writel(chan, HSU_CH_DCR, 0x1 439 - | (0x1 << 8) 440 - | (0x1 << 16) 441 - | (0x1 << 24) /* timeout bit, see HSU Errata 1 */ 442 - ); 443 - spin_unlock_irqrestore(&up->port.lock, *flags); 444 - tty_flip_buffer_push(tport); 445 - spin_lock_irqsave(&up->port.lock, *flags); 446 - 447 - chan_writel(chan, HSU_CH_CR, 0x3); 448 - 449 - } 450 - 451 - static void serial_hsu_stop_rx(struct uart_port *port) 452 - { 453 - struct uart_hsu_port *up = 454 - container_of(port, struct uart_hsu_port, port); 455 - struct hsu_dma_chan *chan = up->rxc; 456 - 457 - if (up->use_dma) 458 - chan_writel(chan, HSU_CH_CR, 0x2); 459 - else { 460 - up->ier &= ~UART_IER_RLSI; 461 - up->port.read_status_mask &= ~UART_LSR_DR; 462 - serial_out(up, UART_IER, up->ier); 463 - } 464 - } 465 - 466 - static inline void receive_chars(struct uart_hsu_port *up, int *status, 467 - unsigned long *flags) 468 - { 469 - unsigned int ch, flag; 470 - unsigned int max_count = 256; 471 - 472 - do { 473 - ch = serial_in(up, UART_RX); 474 - flag = TTY_NORMAL; 475 - up->port.icount.rx++; 476 - 477 - if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | 478 - UART_LSR_FE | UART_LSR_OE))) { 479 - 480 - dev_warn(up->dev, "We really rush into ERR/BI case" 481 - "status = 0x%02x", *status); 482 - /* For statistics only */ 483 - if (*status & UART_LSR_BI) { 484 - *status &= ~(UART_LSR_FE | UART_LSR_PE); 485 - up->port.icount.brk++; 486 - /* 487 - * We do the SysRQ and SAK checking 488 - * here because otherwise the break 489 - * may get masked by ignore_status_mask 490 - * or read_status_mask. 491 - */ 492 - if (uart_handle_break(&up->port)) 493 - goto ignore_char; 494 - } else if (*status & UART_LSR_PE) 495 - up->port.icount.parity++; 496 - else if (*status & UART_LSR_FE) 497 - up->port.icount.frame++; 498 - if (*status & UART_LSR_OE) 499 - up->port.icount.overrun++; 500 - 501 - /* Mask off conditions which should be ignored. */ 502 - *status &= up->port.read_status_mask; 503 - 504 - #ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE 505 - if (up->port.cons && 506 - up->port.cons->index == up->port.line) { 507 - /* Recover the break flag from console xmit */ 508 - *status |= up->lsr_break_flag; 509 - up->lsr_break_flag = 0; 510 - } 511 - #endif 512 - if (*status & UART_LSR_BI) { 513 - flag = TTY_BREAK; 514 - } else if (*status & UART_LSR_PE) 515 - flag = TTY_PARITY; 516 - else if (*status & UART_LSR_FE) 517 - flag = TTY_FRAME; 518 - } 519 - 520 - if (uart_handle_sysrq_char(&up->port, ch)) 521 - goto ignore_char; 522 - 523 - uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); 524 - ignore_char: 525 - *status = serial_in(up, UART_LSR); 526 - } while ((*status & UART_LSR_DR) && max_count--); 527 - 528 - spin_unlock_irqrestore(&up->port.lock, *flags); 529 - tty_flip_buffer_push(&up->port.state->port); 530 - spin_lock_irqsave(&up->port.lock, *flags); 531 - } 532 - 533 - static void transmit_chars(struct uart_hsu_port *up) 534 - { 535 - struct circ_buf *xmit = &up->port.state->xmit; 536 - int count; 537 - 538 - if (up->port.x_char) { 539 - serial_out(up, UART_TX, up->port.x_char); 540 - up->port.icount.tx++; 541 - up->port.x_char = 0; 542 - return; 543 - } 544 - if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 545 - serial_hsu_stop_tx(&up->port); 546 - return; 547 - } 548 - 549 - /* The IRQ is for TX FIFO half-empty */ 550 - count = up->port.fifosize / 2; 551 - 552 - do { 553 - serial_out(up, UART_TX, xmit->buf[xmit->tail]); 554 - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 555 - 556 - up->port.icount.tx++; 557 - if (uart_circ_empty(xmit)) 558 - break; 559 - } while (--count > 0); 560 - 561 - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 562 - uart_write_wakeup(&up->port); 563 - 564 - if (uart_circ_empty(xmit)) 565 - serial_hsu_stop_tx(&up->port); 566 - } 567 - 568 - static inline void check_modem_status(struct uart_hsu_port *up) 569 - { 570 - int status; 571 - 572 - status = serial_in(up, UART_MSR); 573 - 574 - if ((status & UART_MSR_ANY_DELTA) == 0) 575 - return; 576 - 577 - if (status & UART_MSR_TERI) 578 - up->port.icount.rng++; 579 - if (status & UART_MSR_DDSR) 580 - up->port.icount.dsr++; 581 - /* We may only get DDCD when HW init and reset */ 582 - if (status & UART_MSR_DDCD) 583 - uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); 584 - /* Will start/stop_tx accordingly */ 585 - if (status & UART_MSR_DCTS) 586 - uart_handle_cts_change(&up->port, status & UART_MSR_CTS); 587 - 588 - wake_up_interruptible(&up->port.state->port.delta_msr_wait); 589 - } 590 - 591 - /* 592 - * This handles the interrupt from one port. 593 - */ 594 - static irqreturn_t port_irq(int irq, void *dev_id) 595 - { 596 - struct uart_hsu_port *up = dev_id; 597 - unsigned int iir, lsr; 598 - unsigned long flags; 599 - 600 - if (unlikely(!up->running)) 601 - return IRQ_NONE; 602 - 603 - spin_lock_irqsave(&up->port.lock, flags); 604 - if (up->use_dma) { 605 - lsr = serial_in(up, UART_LSR); 606 - if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE | 607 - UART_LSR_FE | UART_LSR_OE))) 608 - dev_warn(up->dev, 609 - "Got lsr irq while using DMA, lsr = 0x%2x\n", 610 - lsr); 611 - check_modem_status(up); 612 - spin_unlock_irqrestore(&up->port.lock, flags); 613 - return IRQ_HANDLED; 614 - } 615 - 616 - iir = serial_in(up, UART_IIR); 617 - if (iir & UART_IIR_NO_INT) { 618 - spin_unlock_irqrestore(&up->port.lock, flags); 619 - return IRQ_NONE; 620 - } 621 - 622 - lsr = serial_in(up, UART_LSR); 623 - if (lsr & UART_LSR_DR) 624 - receive_chars(up, &lsr, &flags); 625 - check_modem_status(up); 626 - 627 - /* lsr will be renewed during the receive_chars */ 628 - if (lsr & UART_LSR_THRE) 629 - transmit_chars(up); 630 - 631 - spin_unlock_irqrestore(&up->port.lock, flags); 632 - return IRQ_HANDLED; 633 - } 634 - 635 - static inline void dma_chan_irq(struct hsu_dma_chan *chan) 636 - { 637 - struct uart_hsu_port *up = chan->uport; 638 - unsigned long flags; 639 - u32 int_sts; 640 - 641 - spin_lock_irqsave(&up->port.lock, flags); 642 - 643 - if (!up->use_dma || !up->running) 644 - goto exit; 645 - 646 - /* 647 - * No matter what situation, need read clear the IRQ status 648 - * There is a bug, see Errata 5, HSD 2900918 649 - */ 650 - int_sts = chan_readl(chan, HSU_CH_SR); 651 - 652 - /* Rx channel */ 653 - if (chan->dirt == DMA_FROM_DEVICE) 654 - hsu_dma_rx(up, int_sts, &flags); 655 - 656 - /* Tx channel */ 657 - if (chan->dirt == DMA_TO_DEVICE) { 658 - chan_writel(chan, HSU_CH_CR, 0x0); 659 - up->dma_tx_on = 0; 660 - hsu_dma_tx(up); 661 - } 662 - 663 - exit: 664 - spin_unlock_irqrestore(&up->port.lock, flags); 665 - return; 666 - } 667 - 668 - static irqreturn_t dma_irq(int irq, void *dev_id) 669 - { 670 - struct hsu_port *hsu = dev_id; 671 - u32 int_sts, i; 672 - 673 - int_sts = mfd_readl(hsu, HSU_GBL_DMAISR); 674 - 675 - /* Currently we only have 6 channels may be used */ 676 - for (i = 0; i < 6; i++) { 677 - if (int_sts & 0x1) 678 - dma_chan_irq(&hsu->chans[i]); 679 - int_sts >>= 1; 680 - } 681 - 682 - return IRQ_HANDLED; 683 - } 684 - 685 - static unsigned int serial_hsu_tx_empty(struct uart_port *port) 686 - { 687 - struct uart_hsu_port *up = 688 - container_of(port, struct uart_hsu_port, port); 689 - unsigned long flags; 690 - unsigned int ret; 691 - 692 - spin_lock_irqsave(&up->port.lock, flags); 693 - ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 694 - spin_unlock_irqrestore(&up->port.lock, flags); 695 - 696 - return ret; 697 - } 698 - 699 - static unsigned int serial_hsu_get_mctrl(struct uart_port *port) 700 - { 701 - struct uart_hsu_port *up = 702 - container_of(port, struct uart_hsu_port, port); 703 - unsigned char status; 704 - unsigned int ret; 705 - 706 - status = serial_in(up, UART_MSR); 707 - 708 - ret = 0; 709 - if (status & UART_MSR_DCD) 710 - ret |= TIOCM_CAR; 711 - if (status & UART_MSR_RI) 712 - ret |= TIOCM_RNG; 713 - if (status & UART_MSR_DSR) 714 - ret |= TIOCM_DSR; 715 - if (status & UART_MSR_CTS) 716 - ret |= TIOCM_CTS; 717 - return ret; 718 - } 719 - 720 - static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl) 721 - { 722 - struct uart_hsu_port *up = 723 - container_of(port, struct uart_hsu_port, port); 724 - unsigned char mcr = 0; 725 - 726 - if (mctrl & TIOCM_RTS) 727 - mcr |= UART_MCR_RTS; 728 - if (mctrl & TIOCM_DTR) 729 - mcr |= UART_MCR_DTR; 730 - if (mctrl & TIOCM_OUT1) 731 - mcr |= UART_MCR_OUT1; 732 - if (mctrl & TIOCM_OUT2) 733 - mcr |= UART_MCR_OUT2; 734 - if (mctrl & TIOCM_LOOP) 735 - mcr |= UART_MCR_LOOP; 736 - 737 - mcr |= up->mcr; 738 - 739 - serial_out(up, UART_MCR, mcr); 740 - } 741 - 742 - static void serial_hsu_break_ctl(struct uart_port *port, int break_state) 743 - { 744 - struct uart_hsu_port *up = 745 - container_of(port, struct uart_hsu_port, port); 746 - unsigned long flags; 747 - 748 - spin_lock_irqsave(&up->port.lock, flags); 749 - if (break_state == -1) 750 - up->lcr |= UART_LCR_SBC; 751 - else 752 - up->lcr &= ~UART_LCR_SBC; 753 - serial_out(up, UART_LCR, up->lcr); 754 - spin_unlock_irqrestore(&up->port.lock, flags); 755 - } 756 - 757 - /* 758 - * What special to do: 759 - * 1. chose the 64B fifo mode 760 - * 2. start dma or pio depends on configuration 761 - * 3. we only allocate dma memory when needed 762 - */ 763 - static int serial_hsu_startup(struct uart_port *port) 764 - { 765 - struct uart_hsu_port *up = 766 - container_of(port, struct uart_hsu_port, port); 767 - unsigned long flags; 768 - 769 - pm_runtime_get_sync(up->dev); 770 - 771 - /* 772 - * Clear the FIFO buffers and disable them. 773 - * (they will be reenabled in set_termios()) 774 - */ 775 - serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); 776 - serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | 777 - UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); 778 - serial_out(up, UART_FCR, 0); 779 - 780 - /* Clear the interrupt registers. */ 781 - (void) serial_in(up, UART_LSR); 782 - (void) serial_in(up, UART_RX); 783 - (void) serial_in(up, UART_IIR); 784 - (void) serial_in(up, UART_MSR); 785 - 786 - /* Now, initialize the UART, default is 8n1 */ 787 - serial_out(up, UART_LCR, UART_LCR_WLEN8); 788 - 789 - spin_lock_irqsave(&up->port.lock, flags); 790 - 791 - up->port.mctrl |= TIOCM_OUT2; 792 - serial_hsu_set_mctrl(&up->port, up->port.mctrl); 793 - 794 - /* 795 - * Finally, enable interrupts. Note: Modem status interrupts 796 - * are set via set_termios(), which will be occurring imminently 797 - * anyway, so we don't enable them here. 798 - */ 799 - if (!up->use_dma) 800 - up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE; 801 - else 802 - up->ier = 0; 803 - serial_out(up, UART_IER, up->ier); 804 - 805 - spin_unlock_irqrestore(&up->port.lock, flags); 806 - 807 - /* DMA init */ 808 - if (up->use_dma) { 809 - struct hsu_dma_buffer *dbuf; 810 - struct circ_buf *xmit = &port->state->xmit; 811 - 812 - up->dma_tx_on = 0; 813 - 814 - /* First allocate the RX buffer */ 815 - dbuf = &up->rxbuf; 816 - dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL); 817 - if (!dbuf->buf) { 818 - up->use_dma = 0; 819 - goto exit; 820 - } 821 - dbuf->dma_addr = dma_map_single(port->dev, 822 - dbuf->buf, 823 - HSU_DMA_BUF_SIZE, 824 - DMA_FROM_DEVICE); 825 - dbuf->dma_size = HSU_DMA_BUF_SIZE; 826 - 827 - /* Start the RX channel right now */ 828 - hsu_dma_start_rx_chan(up->rxc, dbuf); 829 - 830 - /* Next init the TX DMA */ 831 - dbuf = &up->txbuf; 832 - dbuf->buf = xmit->buf; 833 - dbuf->dma_addr = dma_map_single(port->dev, 834 - dbuf->buf, 835 - UART_XMIT_SIZE, 836 - DMA_TO_DEVICE); 837 - dbuf->dma_size = UART_XMIT_SIZE; 838 - 839 - /* This should not be changed all around */ 840 - chan_writel(up->txc, HSU_CH_BSR, 32); 841 - chan_writel(up->txc, HSU_CH_MOTSR, 4); 842 - dbuf->ofs = 0; 843 - } 844 - 845 - exit: 846 - /* And clear the interrupt registers again for luck. */ 847 - (void) serial_in(up, UART_LSR); 848 - (void) serial_in(up, UART_RX); 849 - (void) serial_in(up, UART_IIR); 850 - (void) serial_in(up, UART_MSR); 851 - 852 - up->running = 1; 853 - return 0; 854 - } 855 - 856 - static void serial_hsu_shutdown(struct uart_port *port) 857 - { 858 - struct uart_hsu_port *up = 859 - container_of(port, struct uart_hsu_port, port); 860 - unsigned long flags; 861 - 862 - /* Disable interrupts from this port */ 863 - up->ier = 0; 864 - serial_out(up, UART_IER, 0); 865 - up->running = 0; 866 - 867 - spin_lock_irqsave(&up->port.lock, flags); 868 - up->port.mctrl &= ~TIOCM_OUT2; 869 - serial_hsu_set_mctrl(&up->port, up->port.mctrl); 870 - spin_unlock_irqrestore(&up->port.lock, flags); 871 - 872 - /* Disable break condition and FIFOs */ 873 - serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); 874 - serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | 875 - UART_FCR_CLEAR_RCVR | 876 - UART_FCR_CLEAR_XMIT); 877 - serial_out(up, UART_FCR, 0); 878 - 879 - pm_runtime_put(up->dev); 880 - } 881 - 882 - static void 883 - serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, 884 - struct ktermios *old) 885 - { 886 - struct uart_hsu_port *up = 887 - container_of(port, struct uart_hsu_port, port); 888 - unsigned char cval, fcr = 0; 889 - unsigned long flags; 890 - unsigned int baud, quot; 891 - u32 ps, mul; 892 - 893 - switch (termios->c_cflag & CSIZE) { 894 - case CS5: 895 - cval = UART_LCR_WLEN5; 896 - break; 897 - case CS6: 898 - cval = UART_LCR_WLEN6; 899 - break; 900 - case CS7: 901 - cval = UART_LCR_WLEN7; 902 - break; 903 - default: 904 - case CS8: 905 - cval = UART_LCR_WLEN8; 906 - break; 907 - } 908 - 909 - /* CMSPAR isn't supported by this driver */ 910 - termios->c_cflag &= ~CMSPAR; 911 - 912 - if (termios->c_cflag & CSTOPB) 913 - cval |= UART_LCR_STOP; 914 - if (termios->c_cflag & PARENB) 915 - cval |= UART_LCR_PARITY; 916 - if (!(termios->c_cflag & PARODD)) 917 - cval |= UART_LCR_EPAR; 918 - 919 - /* 920 - * The base clk is 50Mhz, and the baud rate come from: 921 - * baud = 50M * MUL / (DIV * PS * DLAB) 922 - * 923 - * For those basic low baud rate we can get the direct 924 - * scalar from 2746800, like 115200 = 2746800/24. For those 925 - * higher baud rate, we handle them case by case, mainly by 926 - * adjusting the MUL/PS registers, and DIV register is kept 927 - * as default value 0x3d09 to make things simple 928 - */ 929 - baud = uart_get_baud_rate(port, termios, old, 0, 4000000); 930 - 931 - quot = 1; 932 - ps = 0x10; 933 - mul = 0x3600; 934 - switch (baud) { 935 - case 3500000: 936 - mul = 0x3345; 937 - ps = 0xC; 938 - break; 939 - case 1843200: 940 - mul = 0x2400; 941 - break; 942 - case 3000000: 943 - case 2500000: 944 - case 2000000: 945 - case 1500000: 946 - case 1000000: 947 - case 500000: 948 - /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */ 949 - mul = baud / 500000 * 0x9C4; 950 - break; 951 - default: 952 - /* Use uart_get_divisor to get quot for other baud rates */ 953 - quot = 0; 954 - } 955 - 956 - if (!quot) 957 - quot = uart_get_divisor(port, baud); 958 - 959 - if ((up->port.uartclk / quot) < (2400 * 16)) 960 - fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B; 961 - else if ((up->port.uartclk / quot) < (230400 * 16)) 962 - fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B; 963 - else 964 - fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B; 965 - 966 - fcr |= UART_FCR_HSU_64B_FIFO; 967 - 968 - /* 969 - * Ok, we're now changing the port state. Do it with 970 - * interrupts disabled. 971 - */ 972 - spin_lock_irqsave(&up->port.lock, flags); 973 - 974 - /* Update the per-port timeout */ 975 - uart_update_timeout(port, termios->c_cflag, baud); 976 - 977 - up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; 978 - if (termios->c_iflag & INPCK) 979 - up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; 980 - if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 981 - up->port.read_status_mask |= UART_LSR_BI; 982 - 983 - /* Characters to ignore */ 984 - up->port.ignore_status_mask = 0; 985 - if (termios->c_iflag & IGNPAR) 986 - up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; 987 - if (termios->c_iflag & IGNBRK) { 988 - up->port.ignore_status_mask |= UART_LSR_BI; 989 - /* 990 - * If we're ignoring parity and break indicators, 991 - * ignore overruns too (for real raw support). 992 - */ 993 - if (termios->c_iflag & IGNPAR) 994 - up->port.ignore_status_mask |= UART_LSR_OE; 995 - } 996 - 997 - /* Ignore all characters if CREAD is not set */ 998 - if ((termios->c_cflag & CREAD) == 0) 999 - up->port.ignore_status_mask |= UART_LSR_DR; 1000 - 1001 - /* 1002 - * CTS flow control flag and modem status interrupts, disable 1003 - * MSI by default 1004 - */ 1005 - up->ier &= ~UART_IER_MSI; 1006 - if (UART_ENABLE_MS(&up->port, termios->c_cflag)) 1007 - up->ier |= UART_IER_MSI; 1008 - 1009 - serial_out(up, UART_IER, up->ier); 1010 - 1011 - if (termios->c_cflag & CRTSCTS) 1012 - up->mcr |= UART_MCR_AFE | UART_MCR_RTS; 1013 - else 1014 - up->mcr &= ~UART_MCR_AFE; 1015 - 1016 - serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */ 1017 - serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ 1018 - serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ 1019 - serial_out(up, UART_LCR, cval); /* reset DLAB */ 1020 - serial_out(up, UART_MUL, mul); /* set MUL */ 1021 - serial_out(up, UART_PS, ps); /* set PS */ 1022 - up->lcr = cval; /* Save LCR */ 1023 - serial_hsu_set_mctrl(&up->port, up->port.mctrl); 1024 - serial_out(up, UART_FCR, fcr); 1025 - spin_unlock_irqrestore(&up->port.lock, flags); 1026 - } 1027 - 1028 - static void 1029 - serial_hsu_pm(struct uart_port *port, unsigned int state, 1030 - unsigned int oldstate) 1031 - { 1032 - } 1033 - 1034 - static void serial_hsu_release_port(struct uart_port *port) 1035 - { 1036 - } 1037 - 1038 - static int serial_hsu_request_port(struct uart_port *port) 1039 - { 1040 - return 0; 1041 - } 1042 - 1043 - static void serial_hsu_config_port(struct uart_port *port, int flags) 1044 - { 1045 - struct uart_hsu_port *up = 1046 - container_of(port, struct uart_hsu_port, port); 1047 - up->port.type = PORT_MFD; 1048 - } 1049 - 1050 - static int 1051 - serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser) 1052 - { 1053 - /* We don't want the core code to modify any port params */ 1054 - return -EINVAL; 1055 - } 1056 - 1057 - static const char * 1058 - serial_hsu_type(struct uart_port *port) 1059 - { 1060 - struct uart_hsu_port *up = 1061 - container_of(port, struct uart_hsu_port, port); 1062 - return up->name; 1063 - } 1064 - 1065 - /* Mainly for uart console use */ 1066 - static struct uart_hsu_port *serial_hsu_ports[3]; 1067 - static struct uart_driver serial_hsu_reg; 1068 - 1069 - #ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE 1070 - 1071 - #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 1072 - 1073 - /* Wait for transmitter & holding register to empty */ 1074 - static inline void wait_for_xmitr(struct uart_hsu_port *up) 1075 - { 1076 - unsigned int status, tmout = 1000; 1077 - 1078 - /* Wait up to 1ms for the character to be sent. */ 1079 - do { 1080 - status = serial_in(up, UART_LSR); 1081 - 1082 - if (status & UART_LSR_BI) 1083 - up->lsr_break_flag = UART_LSR_BI; 1084 - 1085 - if (--tmout == 0) 1086 - break; 1087 - udelay(1); 1088 - } while (!(status & BOTH_EMPTY)); 1089 - 1090 - /* Wait up to 1s for flow control if necessary */ 1091 - if (up->port.flags & UPF_CONS_FLOW) { 1092 - tmout = 1000000; 1093 - while (--tmout && 1094 - ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) 1095 - udelay(1); 1096 - } 1097 - } 1098 - 1099 - static void serial_hsu_console_putchar(struct uart_port *port, int ch) 1100 - { 1101 - struct uart_hsu_port *up = 1102 - container_of(port, struct uart_hsu_port, port); 1103 - 1104 - wait_for_xmitr(up); 1105 - serial_out(up, UART_TX, ch); 1106 - } 1107 - 1108 - /* 1109 - * Print a string to the serial port trying not to disturb 1110 - * any possible real use of the port... 1111 - * 1112 - * The console_lock must be held when we get here. 1113 - */ 1114 - static void 1115 - serial_hsu_console_write(struct console *co, const char *s, unsigned int count) 1116 - { 1117 - struct uart_hsu_port *up = serial_hsu_ports[co->index]; 1118 - unsigned long flags; 1119 - unsigned int ier; 1120 - int locked = 1; 1121 - 1122 - touch_nmi_watchdog(); 1123 - 1124 - local_irq_save(flags); 1125 - if (up->port.sysrq) 1126 - locked = 0; 1127 - else if (oops_in_progress) { 1128 - locked = spin_trylock(&up->port.lock); 1129 - } else 1130 - spin_lock(&up->port.lock); 1131 - 1132 - /* First save the IER then disable the interrupts */ 1133 - ier = serial_in(up, UART_IER); 1134 - serial_out(up, UART_IER, 0); 1135 - 1136 - uart_console_write(&up->port, s, count, serial_hsu_console_putchar); 1137 - 1138 - /* 1139 - * Finally, wait for transmitter to become empty 1140 - * and restore the IER 1141 - */ 1142 - wait_for_xmitr(up); 1143 - serial_out(up, UART_IER, ier); 1144 - 1145 - if (locked) 1146 - spin_unlock(&up->port.lock); 1147 - local_irq_restore(flags); 1148 - } 1149 - 1150 - static struct console serial_hsu_console; 1151 - 1152 - static int __init 1153 - serial_hsu_console_setup(struct console *co, char *options) 1154 - { 1155 - struct uart_hsu_port *up; 1156 - int baud = 115200; 1157 - int bits = 8; 1158 - int parity = 'n'; 1159 - int flow = 'n'; 1160 - 1161 - if (co->index == -1 || co->index >= serial_hsu_reg.nr) 1162 - co->index = 0; 1163 - up = serial_hsu_ports[co->index]; 1164 - if (!up) 1165 - return -ENODEV; 1166 - 1167 - if (options) 1168 - uart_parse_options(options, &baud, &parity, &bits, &flow); 1169 - 1170 - return uart_set_options(&up->port, co, baud, parity, bits, flow); 1171 - } 1172 - 1173 - static struct console serial_hsu_console = { 1174 - .name = "ttyMFD", 1175 - .write = serial_hsu_console_write, 1176 - .device = uart_console_device, 1177 - .setup = serial_hsu_console_setup, 1178 - .flags = CON_PRINTBUFFER, 1179 - .index = -1, 1180 - .data = &serial_hsu_reg, 1181 - }; 1182 - 1183 - #define SERIAL_HSU_CONSOLE (&serial_hsu_console) 1184 - #else 1185 - #define SERIAL_HSU_CONSOLE NULL 1186 - #endif 1187 - 1188 - static struct uart_ops serial_hsu_pops = { 1189 - .tx_empty = serial_hsu_tx_empty, 1190 - .set_mctrl = serial_hsu_set_mctrl, 1191 - .get_mctrl = serial_hsu_get_mctrl, 1192 - .stop_tx = serial_hsu_stop_tx, 1193 - .start_tx = serial_hsu_start_tx, 1194 - .stop_rx = serial_hsu_stop_rx, 1195 - .enable_ms = serial_hsu_enable_ms, 1196 - .break_ctl = serial_hsu_break_ctl, 1197 - .startup = serial_hsu_startup, 1198 - .shutdown = serial_hsu_shutdown, 1199 - .set_termios = serial_hsu_set_termios, 1200 - .pm = serial_hsu_pm, 1201 - .type = serial_hsu_type, 1202 - .release_port = serial_hsu_release_port, 1203 - .request_port = serial_hsu_request_port, 1204 - .config_port = serial_hsu_config_port, 1205 - .verify_port = serial_hsu_verify_port, 1206 - }; 1207 - 1208 - static struct uart_driver serial_hsu_reg = { 1209 - .owner = THIS_MODULE, 1210 - .driver_name = "MFD serial", 1211 - .dev_name = "ttyMFD", 1212 - .major = TTY_MAJOR, 1213 - .minor = 128, 1214 - .nr = 3, 1215 - .cons = SERIAL_HSU_CONSOLE, 1216 - }; 1217 - 1218 - #ifdef CONFIG_PM 1219 - static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state) 1220 - { 1221 - void *priv = pci_get_drvdata(pdev); 1222 - struct uart_hsu_port *up; 1223 - 1224 - /* Make sure this is not the internal dma controller */ 1225 - if (priv && (pdev->device != 0x081E)) { 1226 - up = priv; 1227 - uart_suspend_port(&serial_hsu_reg, &up->port); 1228 - } 1229 - 1230 - pci_save_state(pdev); 1231 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1232 - return 0; 1233 - } 1234 - 1235 - static int serial_hsu_resume(struct pci_dev *pdev) 1236 - { 1237 - void *priv = pci_get_drvdata(pdev); 1238 - struct uart_hsu_port *up; 1239 - int ret; 1240 - 1241 - pci_set_power_state(pdev, PCI_D0); 1242 - pci_restore_state(pdev); 1243 - 1244 - ret = pci_enable_device(pdev); 1245 - if (ret) 1246 - dev_warn(&pdev->dev, 1247 - "HSU: can't re-enable device, try to continue\n"); 1248 - 1249 - if (priv && (pdev->device != 0x081E)) { 1250 - up = priv; 1251 - uart_resume_port(&serial_hsu_reg, &up->port); 1252 - } 1253 - return 0; 1254 - } 1255 - 1256 - static int serial_hsu_runtime_idle(struct device *dev) 1257 - { 1258 - pm_schedule_suspend(dev, 500); 1259 - return -EBUSY; 1260 - } 1261 - 1262 - static int serial_hsu_runtime_suspend(struct device *dev) 1263 - { 1264 - return 0; 1265 - } 1266 - 1267 - static int serial_hsu_runtime_resume(struct device *dev) 1268 - { 1269 - return 0; 1270 - } 1271 - #else 1272 - #define serial_hsu_suspend NULL 1273 - #define serial_hsu_resume NULL 1274 - #define serial_hsu_runtime_idle NULL 1275 - #define serial_hsu_runtime_suspend NULL 1276 - #define serial_hsu_runtime_resume NULL 1277 - #endif 1278 - 1279 - static const struct dev_pm_ops serial_hsu_pm_ops = { 1280 - .runtime_suspend = serial_hsu_runtime_suspend, 1281 - .runtime_resume = serial_hsu_runtime_resume, 1282 - .runtime_idle = serial_hsu_runtime_idle, 1283 - }; 1284 - 1285 - /* temp global pointer before we settle down on using one or four PCI dev */ 1286 - static struct hsu_port *phsu; 1287 - 1288 - static int serial_hsu_probe(struct pci_dev *pdev, 1289 - const struct pci_device_id *ent) 1290 - { 1291 - struct uart_hsu_port *uport; 1292 - int index, ret; 1293 - 1294 - printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n", 1295 - pdev->vendor, pdev->device); 1296 - 1297 - switch (pdev->device) { 1298 - case 0x081B: 1299 - index = 0; 1300 - break; 1301 - case 0x081C: 1302 - index = 1; 1303 - break; 1304 - case 0x081D: 1305 - index = 2; 1306 - break; 1307 - case 0x081E: 1308 - /* internal DMA controller */ 1309 - index = 3; 1310 - break; 1311 - default: 1312 - dev_err(&pdev->dev, "HSU: out of index!"); 1313 - return -ENODEV; 1314 - } 1315 - 1316 - ret = pci_enable_device(pdev); 1317 - if (ret) 1318 - return ret; 1319 - 1320 - if (index == 3) { 1321 - /* DMA controller */ 1322 - ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu); 1323 - if (ret) { 1324 - dev_err(&pdev->dev, "can not get IRQ\n"); 1325 - goto err_disable; 1326 - } 1327 - pci_set_drvdata(pdev, phsu); 1328 - } else { 1329 - /* UART port 0~2 */ 1330 - uport = &phsu->port[index]; 1331 - uport->port.irq = pdev->irq; 1332 - uport->port.dev = &pdev->dev; 1333 - uport->dev = &pdev->dev; 1334 - 1335 - ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport); 1336 - if (ret) { 1337 - dev_err(&pdev->dev, "can not get IRQ\n"); 1338 - goto err_disable; 1339 - } 1340 - uart_add_one_port(&serial_hsu_reg, &uport->port); 1341 - 1342 - pci_set_drvdata(pdev, uport); 1343 - } 1344 - 1345 - pm_runtime_put_noidle(&pdev->dev); 1346 - pm_runtime_allow(&pdev->dev); 1347 - 1348 - return 0; 1349 - 1350 - err_disable: 1351 - pci_disable_device(pdev); 1352 - return ret; 1353 - } 1354 - 1355 - static void hsu_global_init(void) 1356 - { 1357 - struct hsu_port *hsu; 1358 - struct uart_hsu_port *uport; 1359 - struct hsu_dma_chan *dchan; 1360 - int i, ret; 1361 - 1362 - hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL); 1363 - if (!hsu) 1364 - return; 1365 - 1366 - /* Get basic io resource and map it */ 1367 - hsu->paddr = 0xffa28000; 1368 - hsu->iolen = 0x1000; 1369 - 1370 - if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global"))) 1371 - pr_warn("HSU: error in request mem region\n"); 1372 - 1373 - hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen); 1374 - if (!hsu->reg) { 1375 - pr_err("HSU: error in ioremap\n"); 1376 - ret = -ENOMEM; 1377 - goto err_free_region; 1378 - } 1379 - 1380 - /* Initialise the 3 UART ports */ 1381 - uport = hsu->port; 1382 - for (i = 0; i < 3; i++) { 1383 - uport->port.type = PORT_MFD; 1384 - uport->port.iotype = UPIO_MEM; 1385 - uport->port.mapbase = (resource_size_t)hsu->paddr 1386 - + HSU_PORT_REG_OFFSET 1387 - + i * HSU_PORT_REG_LENGTH; 1388 - uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET 1389 - + i * HSU_PORT_REG_LENGTH; 1390 - 1391 - sprintf(uport->name, "hsu_port%d", i); 1392 - uport->port.fifosize = 64; 1393 - uport->port.ops = &serial_hsu_pops; 1394 - uport->port.line = i; 1395 - uport->port.flags = UPF_IOREMAP; 1396 - /* set the scalable maxim support rate to 2746800 bps */ 1397 - uport->port.uartclk = 115200 * 24 * 16; 1398 - 1399 - uport->running = 0; 1400 - uport->txc = &hsu->chans[i * 2]; 1401 - uport->rxc = &hsu->chans[i * 2 + 1]; 1402 - 1403 - serial_hsu_ports[i] = uport; 1404 - uport->index = i; 1405 - 1406 - if (hsu_dma_enable & (1<<i)) 1407 - uport->use_dma = 1; 1408 - else 1409 - uport->use_dma = 0; 1410 - 1411 - uport++; 1412 - } 1413 - 1414 - /* Initialise 6 dma channels */ 1415 - dchan = hsu->chans; 1416 - for (i = 0; i < 6; i++) { 1417 - dchan->id = i; 1418 - dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1419 - dchan->uport = &hsu->port[i/2]; 1420 - dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET + 1421 - i * HSU_DMA_CHANS_REG_LENGTH; 1422 - 1423 - dchan++; 1424 - } 1425 - 1426 - phsu = hsu; 1427 - hsu_debugfs_init(hsu); 1428 - return; 1429 - 1430 - err_free_region: 1431 - release_mem_region(hsu->paddr, hsu->iolen); 1432 - kfree(hsu); 1433 - return; 1434 - } 1435 - 1436 - static void serial_hsu_remove(struct pci_dev *pdev) 1437 - { 1438 - void *priv = pci_get_drvdata(pdev); 1439 - struct uart_hsu_port *up; 1440 - 1441 - if (!priv) 1442 - return; 1443 - 1444 - pm_runtime_forbid(&pdev->dev); 1445 - pm_runtime_get_noresume(&pdev->dev); 1446 - 1447 - /* For port 0/1/2, priv is the address of uart_hsu_port */ 1448 - if (pdev->device != 0x081E) { 1449 - up = priv; 1450 - uart_remove_one_port(&serial_hsu_reg, &up->port); 1451 - } 1452 - 1453 - free_irq(pdev->irq, priv); 1454 - pci_disable_device(pdev); 1455 - } 1456 - 1457 - /* First 3 are UART ports, and the 4th is the DMA */ 1458 - static const struct pci_device_id pci_ids[] = { 1459 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) }, 1460 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) }, 1461 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) }, 1462 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) }, 1463 - {}, 1464 - }; 1465 - 1466 - static struct pci_driver hsu_pci_driver = { 1467 - .name = "HSU serial", 1468 - .id_table = pci_ids, 1469 - .probe = serial_hsu_probe, 1470 - .remove = serial_hsu_remove, 1471 - .suspend = serial_hsu_suspend, 1472 - .resume = serial_hsu_resume, 1473 - .driver = { 1474 - .pm = &serial_hsu_pm_ops, 1475 - }, 1476 - }; 1477 - 1478 - static int __init hsu_pci_init(void) 1479 - { 1480 - int ret; 1481 - 1482 - hsu_global_init(); 1483 - 1484 - ret = uart_register_driver(&serial_hsu_reg); 1485 - if (ret) 1486 - return ret; 1487 - 1488 - return pci_register_driver(&hsu_pci_driver); 1489 - } 1490 - 1491 - static void __exit hsu_pci_exit(void) 1492 - { 1493 - pci_unregister_driver(&hsu_pci_driver); 1494 - uart_unregister_driver(&serial_hsu_reg); 1495 - 1496 - hsu_debugfs_remove(phsu); 1497 - 1498 - kfree(phsu); 1499 - } 1500 - 1501 - module_init(hsu_pci_init); 1502 - module_exit(hsu_pci_exit); 1503 - 1504 - MODULE_LICENSE("GPL v2"); 1505 - MODULE_DEVICE_TABLE(pci, pci_ids);
+9 -7
drivers/tty/serial/mxs-auart.c
··· 1155 1155 return 0; 1156 1156 } 1157 1157 1158 - static bool mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev) 1158 + static int mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev) 1159 1159 { 1160 1160 enum mctrl_gpio_idx i; 1161 1161 struct gpio_desc *gpiod; 1162 1162 1163 1163 s->gpios = mctrl_gpio_init(dev, 0); 1164 - if (IS_ERR_OR_NULL(s->gpios)) 1165 - return false; 1164 + if (IS_ERR(s->gpios)) 1165 + return PTR_ERR(s->gpios); 1166 1166 1167 1167 /* Block (enabled before) DMA option if RTS or CTS is GPIO line */ 1168 1168 if (!RTS_AT_AUART() || !CTS_AT_AUART()) { ··· 1180 1180 s->gpio_irq[i] = -EINVAL; 1181 1181 } 1182 1182 1183 - return true; 1183 + return 0; 1184 1184 } 1185 1185 1186 1186 static void mxs_auart_free_gpio_irq(struct mxs_auart_port *s) ··· 1276 1276 1277 1277 platform_set_drvdata(pdev, s); 1278 1278 1279 - if (!mxs_auart_init_gpios(s, &pdev->dev)) 1280 - dev_err(&pdev->dev, 1281 - "Failed to initialize GPIOs. The serial port may not work as expected\n"); 1279 + ret = mxs_auart_init_gpios(s, &pdev->dev); 1280 + if (ret) { 1281 + dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); 1282 + got out_free_irq; 1283 + } 1282 1284 1283 1285 /* 1284 1286 * Get the GPIO lines IRQ
+47 -2
drivers/tty/serial/serial_core.c
··· 1118 1118 1119 1119 cprev = cnow; 1120 1120 } 1121 - 1122 - current->state = TASK_RUNNING; 1121 + __set_current_state(TASK_RUNNING); 1123 1122 remove_wait_queue(&port->delta_msr_wait, &wait); 1124 1123 1125 1124 return ret; ··· 1807 1808 1808 1809 return ports + idx; 1809 1810 } 1811 + 1812 + /** 1813 + * uart_parse_earlycon - Parse earlycon options 1814 + * @p: ptr to 2nd field (ie., just beyond '<name>,') 1815 + * @iotype: ptr for decoded iotype (out) 1816 + * @addr: ptr for decoded mapbase/iobase (out) 1817 + * @options: ptr for <options> field; NULL if not present (out) 1818 + * 1819 + * Decodes earlycon kernel command line parameters of the form 1820 + * earlycon=<name>,io|mmio|mmio32,<addr>,<options> 1821 + * console=<name>,io|mmio|mmio32,<addr>,<options> 1822 + * 1823 + * The optional form 1824 + * earlycon=<name>,0x<addr>,<options> 1825 + * console=<name>,0x<addr>,<options> 1826 + * is also accepted; the returned @iotype will be UPIO_MEM. 1827 + * 1828 + * Returns 0 on success or -EINVAL on failure 1829 + */ 1830 + int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr, 1831 + char **options) 1832 + { 1833 + if (strncmp(p, "mmio,", 5) == 0) { 1834 + *iotype = UPIO_MEM; 1835 + p += 5; 1836 + } else if (strncmp(p, "mmio32,", 7) == 0) { 1837 + *iotype = UPIO_MEM32; 1838 + p += 7; 1839 + } else if (strncmp(p, "io,", 3) == 0) { 1840 + *iotype = UPIO_PORT; 1841 + p += 3; 1842 + } else if (strncmp(p, "0x", 2) == 0) { 1843 + *iotype = UPIO_MEM; 1844 + } else { 1845 + return -EINVAL; 1846 + } 1847 + 1848 + *addr = simple_strtoul(p, NULL, 0); 1849 + p = strchr(p, ','); 1850 + if (p) 1851 + p++; 1852 + 1853 + *options = p; 1854 + return 0; 1855 + } 1856 + EXPORT_SYMBOL_GPL(uart_parse_earlycon); 1810 1857 1811 1858 /** 1812 1859 * uart_parse_options - Parse serial port baud/parity/bits/flow control.
+13 -36
drivers/tty/serial/serial_mctrl_gpio.c
··· 48 48 int value_array[UART_GPIO_MAX]; 49 49 unsigned int count = 0; 50 50 51 - if (IS_ERR_OR_NULL(gpios)) 52 - return; 53 - 54 51 for (i = 0; i < UART_GPIO_MAX; i++) 55 52 if (!IS_ERR_OR_NULL(gpios->gpio[i]) && 56 53 mctrl_gpios_desc[i].dir_out) { ··· 62 65 struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, 63 66 enum mctrl_gpio_idx gidx) 64 67 { 65 - if (!IS_ERR_OR_NULL(gpios) && !IS_ERR_OR_NULL(gpios->gpio[gidx])) 66 - return gpios->gpio[gidx]; 67 - else 68 - return NULL; 68 + return gpios->gpio[gidx]; 69 69 } 70 70 EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod); 71 71 ··· 70 76 { 71 77 enum mctrl_gpio_idx i; 72 78 73 - /* 74 - * return it unchanged if the structure is not allocated 75 - */ 76 - if (IS_ERR_OR_NULL(gpios)) 77 - return *mctrl; 78 - 79 79 for (i = 0; i < UART_GPIO_MAX; i++) { 80 - if (!IS_ERR_OR_NULL(gpios->gpio[i]) && 81 - !mctrl_gpios_desc[i].dir_out) { 80 + if (gpios->gpio[i] && !mctrl_gpios_desc[i].dir_out) { 82 81 if (gpiod_get_value(gpios->gpio[i])) 83 82 *mctrl |= mctrl_gpios_desc[i].mctrl; 84 83 else ··· 94 107 return ERR_PTR(-ENOMEM); 95 108 96 109 for (i = 0; i < UART_GPIO_MAX; i++) { 97 - gpios->gpio[i] = devm_gpiod_get_index(dev, 98 - mctrl_gpios_desc[i].name, 99 - idx); 100 - 101 - /* 102 - * The GPIOs are maybe not all filled, 103 - * this is not an error. 104 - */ 105 - if (IS_ERR_OR_NULL(gpios->gpio[i])) 106 - continue; 110 + enum gpiod_flags flags; 107 111 108 112 if (mctrl_gpios_desc[i].dir_out) 109 - err = gpiod_direction_output(gpios->gpio[i], 0); 113 + flags = GPIOD_OUT_LOW; 110 114 else 111 - err = gpiod_direction_input(gpios->gpio[i]); 112 - if (err) { 113 - dev_dbg(dev, "Unable to set direction for %s GPIO", 114 - mctrl_gpios_desc[i].name); 115 - devm_gpiod_put(dev, gpios->gpio[i]); 116 - gpios->gpio[i] = NULL; 117 - } 115 + flags = GPIOD_IN; 116 + 117 + gpios->gpio[i] = 118 + devm_gpiod_get_index_optional(dev, 119 + mctrl_gpios_desc[i].name, 120 + idx, flags); 121 + 122 + if (IS_ERR(gpios->gpio[i])) 123 + return PTR_ERR(gpios->gpio[i]); 118 124 } 119 125 120 126 return gpios; ··· 117 137 void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios) 118 138 { 119 139 enum mctrl_gpio_idx i; 120 - 121 - if (IS_ERR_OR_NULL(gpios)) 122 - return; 123 140 124 141 for (i = 0; i < UART_GPIO_MAX; i++) 125 142 if (!IS_ERR_OR_NULL(gpios->gpio[i]))
+5 -1
drivers/tty/serial/sprd_serial.c
··· 493 493 return -EINVAL; 494 494 if (port->irq != ser->irq) 495 495 return -EINVAL; 496 + if (port->iotype != ser->io_type) 497 + return -EINVAL; 496 498 return 0; 497 499 } 498 500 ··· 709 707 up->dev = &pdev->dev; 710 708 up->line = index; 711 709 up->type = PORT_SPRD; 712 - up->iotype = SERIAL_IO_PORT; 710 + up->iotype = UPIO_MEM; 713 711 up->uartclk = SPRD_DEF_RATE; 714 712 up->fifosize = SPRD_FIFO_SIZE; 715 713 up->ops = &serial_sprd_ops; ··· 756 754 return ret; 757 755 } 758 756 757 + #ifdef CONFIG_PM_SLEEP 759 758 static int sprd_suspend(struct device *dev) 760 759 { 761 760 struct sprd_uart_port *sup = dev_get_drvdata(dev); ··· 774 771 775 772 return 0; 776 773 } 774 + #endif 777 775 778 776 static SIMPLE_DEV_PM_OPS(sprd_pm_ops, sprd_suspend, sprd_resume); 779 777
+2 -1
drivers/tty/serial/xilinx_uartps.c
··· 1154 1154 return -EINVAL; 1155 1155 1156 1156 if (!port->mapbase) { 1157 - pr_debug("console on ttyPS%i not present\n", co->index); 1157 + pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", 1158 + co->index); 1158 1159 return -ENODEV; 1159 1160 } 1160 1161
+2 -2
drivers/tty/vt/vt.c
··· 1237 1237 1238 1238 struct rgb { u8 r; u8 g; u8 b; }; 1239 1239 1240 - struct rgb rgb_from_256(int i) 1240 + static struct rgb rgb_from_256(int i) 1241 1241 { 1242 1242 struct rgb c; 1243 1243 if (i < 8) { /* Standard colours. */ ··· 1573 1573 case 11: /* set bell duration in msec */ 1574 1574 if (vc->vc_npar >= 1) 1575 1575 vc->vc_bell_duration = (vc->vc_par[1] < 2000) ? 1576 - vc->vc_par[1] * HZ / 1000 : 0; 1576 + msecs_to_jiffies(vc->vc_par[1]) : 0; 1577 1577 else 1578 1578 vc->vc_bell_duration = DEFAULT_BELL_DURATION; 1579 1579 break;
+1 -1
drivers/tty/vt/vt_ioctl.c
··· 388 388 * Generate the tone for the appropriate number of ticks. 389 389 * If the time is zero, turn off sound ourselves. 390 390 */ 391 - ticks = HZ * ((arg >> 16) & 0xffff) / 1000; 391 + ticks = msecs_to_jiffies((arg >> 16) & 0xffff); 392 392 count = ticks ? (arg & 0xffff) : 0; 393 393 if (count) 394 394 count = PIT_TICK_RATE / count;
+48
include/linux/dma/hsu.h
··· 1 + /* 2 + * Driver for the High Speed UART DMA 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #ifndef _DMA_HSU_H 12 + #define _DMA_HSU_H 13 + 14 + #include <linux/device.h> 15 + #include <linux/interrupt.h> 16 + 17 + #include <linux/platform_data/dma-hsu.h> 18 + 19 + struct hsu_dma; 20 + 21 + /** 22 + * struct hsu_dma_chip - representation of HSU DMA hardware 23 + * @dev: struct device of the DMA controller 24 + * @irq: irq line 25 + * @regs: memory mapped I/O space 26 + * @length: I/O space length 27 + * @offset: offset of the I/O space where registers are located 28 + * @hsu: struct hsu_dma that is filed by ->probe() 29 + * @pdata: platform data for the DMA controller if provided 30 + */ 31 + struct hsu_dma_chip { 32 + struct device *dev; 33 + int irq; 34 + void __iomem *regs; 35 + unsigned int length; 36 + unsigned int offset; 37 + struct hsu_dma *hsu; 38 + struct hsu_dma_platform_data *pdata; 39 + }; 40 + 41 + /* Export to the internal users */ 42 + irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); 43 + 44 + /* Export to the platform drivers */ 45 + int hsu_dma_probe(struct hsu_dma_chip *chip); 46 + int hsu_dma_remove(struct hsu_dma_chip *chip); 47 + 48 + #endif /* _DMA_HSU_H */
+25
include/linux/platform_data/dma-hsu.h
··· 1 + /* 2 + * Driver for the High Speed UART DMA 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #ifndef _PLATFORM_DATA_DMA_HSU_H 12 + #define _PLATFORM_DATA_DMA_HSU_H 13 + 14 + #include <linux/device.h> 15 + 16 + struct hsu_dma_slave { 17 + struct device *dma_dev; 18 + int chan_id; 19 + }; 20 + 21 + struct hsu_dma_platform_data { 22 + unsigned short nr_channels; 23 + }; 24 + 25 + #endif /* _PLATFORM_DATA_DMA_HSU_H */
-5
include/linux/platform_data/serial-imx.h
··· 20 20 #define ASMARM_ARCH_UART_H 21 21 22 22 #define IMXUART_HAVE_RTSCTS (1<<0) 23 - #define IMXUART_IRDA (1<<1) 24 23 25 24 struct imxuart_platform_data { 26 25 unsigned int flags; 27 - void (*irda_enable)(int enable); 28 - unsigned int irda_inv_rx:1; 29 - unsigned int irda_inv_tx:1; 30 - unsigned short transceiver_delay; 31 26 }; 32 27 33 28 #endif
+2
include/linux/serial_core.h
··· 354 354 355 355 struct uart_port *uart_get_console(struct uart_port *ports, int nr, 356 356 struct console *c); 357 + int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr, 358 + char **options); 357 359 void uart_parse_options(char *options, int *baud, int *parity, int *bits, 358 360 int *flow); 359 361 int uart_set_options(struct uart_port *port, struct console *co, int baud,
-47
include/linux/serial_mfd.h
··· 1 - #ifndef _SERIAL_MFD_H_ 2 - #define _SERIAL_MFD_H_ 3 - 4 - /* HW register offset definition */ 5 - #define UART_FOR 0x08 6 - #define UART_PS 0x0C 7 - #define UART_MUL 0x0D 8 - #define UART_DIV 0x0E 9 - 10 - #define HSU_GBL_IEN 0x0 11 - #define HSU_GBL_IST 0x4 12 - 13 - #define HSU_GBL_INT_BIT_PORT0 0x0 14 - #define HSU_GBL_INT_BIT_PORT1 0x1 15 - #define HSU_GBL_INT_BIT_PORT2 0x2 16 - #define HSU_GBL_INT_BIT_IRI 0x3 17 - #define HSU_GBL_INT_BIT_HDLC 0x4 18 - #define HSU_GBL_INT_BIT_DMA 0x5 19 - 20 - #define HSU_GBL_ISR 0x8 21 - #define HSU_GBL_DMASR 0x400 22 - #define HSU_GBL_DMAISR 0x404 23 - 24 - #define HSU_PORT_REG_OFFSET 0x80 25 - #define HSU_PORT0_REG_OFFSET 0x80 26 - #define HSU_PORT1_REG_OFFSET 0x100 27 - #define HSU_PORT2_REG_OFFSET 0x180 28 - #define HSU_PORT_REG_LENGTH 0x80 29 - 30 - #define HSU_DMA_CHANS_REG_OFFSET 0x500 31 - #define HSU_DMA_CHANS_REG_LENGTH 0x40 32 - 33 - #define HSU_CH_SR 0x0 /* channel status reg */ 34 - #define HSU_CH_CR 0x4 /* control reg */ 35 - #define HSU_CH_DCR 0x8 /* descriptor control reg */ 36 - #define HSU_CH_BSR 0x10 /* max fifo buffer size reg */ 37 - #define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */ 38 - #define HSU_CH_D0SAR 0x20 /* desc 0 start addr */ 39 - #define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */ 40 - #define HSU_CH_D1SAR 0x28 41 - #define HSU_CH_D1TSR 0x2C 42 - #define HSU_CH_D2SAR 0x30 43 - #define HSU_CH_D2TSR 0x34 44 - #define HSU_CH_D3SAR 0x38 45 - #define HSU_CH_D3TSR 0x3C 46 - 47 - #endif
-19
include/uapi/linux/serial_reg.h
··· 242 242 #define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */ 243 243 244 244 /* 245 - * Intel MID on-chip HSU (High Speed UART) defined bits 246 - */ 247 - #define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */ 248 - #define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */ 249 - #define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */ 250 - #define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */ 251 - 252 - #define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */ 253 - #define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */ 254 - #define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */ 255 - #define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */ 256 - 257 - #define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */ 258 - #define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */ 259 - 260 - #define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */ 261 - #define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */ 262 - 263 - /* 264 245 * These register definitions are for the 16C950 265 246 */ 266 247 #define UART_ASR 0x01 /* Additional Status Register */
-1
kernel/printk/printk.c
··· 2480 2480 newcon->setup(newcon, console_cmdline[i].options) != 0) 2481 2481 break; 2482 2482 newcon->flags |= CON_ENABLED; 2483 - newcon->index = c->index; 2484 2483 if (i == selected_console) { 2485 2484 newcon->flags |= CON_CONSDEV; 2486 2485 preferred_console = selected_console;