Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sh-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6

* 'sh-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (23 commits)
sh: Ignore R_SH_NONE module relocations.
SH: SE7751: Fix pcibios_map_platform_irq prototype.
sh: remove warning and warning_symbol from struct stacktrace_ops
sh: wire up sys_sendmmsg.
clocksource: sh_tmu: Runtime PM support
clocksource: sh_tmu: __clocksource_updatefreq_hz() update
clocksource: sh_cmt: Runtime PM support
clocksource: sh_cmt: __clocksource_updatefreq_hz() update
dmaengine: shdma: synchronize RCU before freeing, simplify spinlock
dmaengine: shdma: add runtime- and system-level power management
dmaengine: shdma: fix locking
sh: sh-sci: sh7377 and sh73a0 build fixes
sh: cosmetic improvement: use an existing pointer
serial: sh-sci: suspend/resume wakeup support V2
serial: sh-sci: Runtime PM support
sh: select IRQ_FORCED_THREADING.
sh: intc: Set virtual IRQs as nothread.
sh: fixup fpu.o compile order
i2c: add a module alias to the sh-mobile driver
ALSA: add a module alias to the FSI driver
...

+235 -157
+1
arch/sh/Kconfig
··· 21 21 select HAVE_REGS_AND_STACK_ACCESS_API 22 22 select HAVE_GENERIC_HARDIRQS 23 23 select HAVE_SPARSE_IRQ 24 + select IRQ_FORCED_THREADING 24 25 select RTC_LIB 25 26 select GENERIC_ATOMIC64 26 27 select GENERIC_IRQ_SHOW
+9 -9
arch/sh/boards/mach-ecovec24/setup.c
··· 482 482 .irq = IRQ0, 483 483 }; 484 484 485 - #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 485 + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 486 486 /* SDHI0 */ 487 487 static void sdhi0_set_pwr(struct platform_device *pdev, int state) 488 488 { ··· 522 522 }, 523 523 }; 524 524 525 - #if !defined(CONFIG_MMC_SH_MMCIF) 525 + #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 526 526 /* SDHI1 */ 527 527 static void sdhi1_set_pwr(struct platform_device *pdev, int state) 528 528 { ··· 836 836 }, 837 837 }; 838 838 839 - #if defined(CONFIG_MMC_SH_MMCIF) 839 + #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) 840 840 /* SH_MMCIF */ 841 841 static void mmcif_set_pwr(struct platform_device *pdev, int state) 842 842 { ··· 898 898 &ceu0_device, 899 899 &ceu1_device, 900 900 &keysc_device, 901 - #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 901 + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 902 902 &sdhi0_device, 903 - #if !defined(CONFIG_MMC_SH_MMCIF) 903 + #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 904 904 &sdhi1_device, 905 905 #endif 906 906 #else ··· 912 912 &fsi_device, 913 913 &irda_device, 914 914 &vou_device, 915 - #if defined(CONFIG_MMC_SH_MMCIF) 915 + #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) 916 916 &sh_mmcif_device, 917 917 #endif 918 918 }; ··· 1180 1180 gpio_direction_input(GPIO_PTR5); 1181 1181 gpio_direction_input(GPIO_PTR6); 1182 1182 1183 - #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 1183 + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 1184 1184 /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ 1185 1185 gpio_request(GPIO_FN_SDHI0CD, NULL); 1186 1186 gpio_request(GPIO_FN_SDHI0WP, NULL); ··· 1193 1193 gpio_request(GPIO_PTB6, NULL); 1194 1194 gpio_direction_output(GPIO_PTB6, 0); 1195 1195 1196 - #if !defined(CONFIG_MMC_SH_MMCIF) 1196 + #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1197 1197 /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */ 1198 1198 gpio_request(GPIO_FN_SDHI1CD, NULL); 1199 1199 gpio_request(GPIO_FN_SDHI1WP, NULL); ··· 1284 1284 gpio_request(GPIO_PTU5, NULL); 1285 1285 gpio_direction_output(GPIO_PTU5, 0); 1286 1286 1287 - #if defined(CONFIG_MMC_SH_MMCIF) 1287 + #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) 1288 1288 /* enable MMCIF (needs DS2.6,7 set to OFF,ON) */ 1289 1289 gpio_request(GPIO_FN_MMC_D7, NULL); 1290 1290 gpio_request(GPIO_FN_MMC_D6, NULL);
+1 -1
arch/sh/configs/ecovec24_defconfig
··· 115 115 CONFIG_USB_FILE_STORAGE=m 116 116 CONFIG_MMC=y 117 117 CONFIG_MMC_SPI=y 118 - CONFIG_MMC_TMIO=y 118 + CONFIG_MMC_SDHI=y 119 119 CONFIG_RTC_CLASS=y 120 120 CONFIG_RTC_DRV_RS5C372=y 121 121 CONFIG_UIO=y
+1 -1
arch/sh/configs/sh7757lcr_defconfig
··· 70 70 CONFIG_USB_OHCI_HCD=y 71 71 CONFIG_USB_STORAGE=y 72 72 CONFIG_MMC=y 73 - CONFIG_MMC_TMIO=y 73 + CONFIG_MMC_SDHI=y 74 74 CONFIG_MMC_SH_MMCIF=y 75 75 CONFIG_EXT2_FS=y 76 76 CONFIG_EXT3_FS=y
+1 -1
arch/sh/drivers/pci/fixups-se7751.c
··· 6 6 #include <linux/io.h> 7 7 #include "pci-sh4.h" 8 8 9 - int __init pcibios_map_platform_irq(u8 slot, u8 pin) 9 + int __init pcibios_map_platform_irq(struct pci_dev *, u8 slot, u8 pin) 10 10 { 11 11 switch (slot) { 12 12 case 0: return 13;
-3
arch/sh/include/asm/stacktrace.h
··· 10 10 /* Generic stack tracer with callbacks */ 11 11 12 12 struct stacktrace_ops { 13 - void (*warning)(void *data, char *msg); 14 - /* msg must contain %s for the symbol */ 15 - void (*warning_symbol)(void *data, char *msg, unsigned long symbol); 16 13 void (*address)(void *data, unsigned long address, int reliable); 17 14 /* On negative return stop dumping */ 18 15 int (*stack)(void *data, char *name);
+2 -1
arch/sh/include/asm/unistd_32.h
··· 373 373 #define __NR_open_by_handle_at 360 374 374 #define __NR_clock_adjtime 361 375 375 #define __NR_syncfs 362 376 + #define __NR_sendmmsg 363 376 377 377 - #define NR_syscalls 363 378 + #define NR_syscalls 364 378 379 379 380 #ifdef __KERNEL__ 380 381
+2 -1
arch/sh/include/asm/unistd_64.h
··· 394 394 #define __NR_open_by_handle_at 371 395 395 #define __NR_clock_adjtime 372 396 396 #define __NR_syncfs 373 397 + #define __NR_sendmmsg 374 397 398 398 399 #ifdef __KERNEL__ 399 400 400 - #define NR_syscalls 374 401 + #define NR_syscalls 375 401 402 402 403 #define __ARCH_WANT_IPC_PARSE_VERSION 403 404 #define __ARCH_WANT_OLD_READDIR
+1 -3
arch/sh/kernel/cpu/Makefile
··· 17 17 18 18 obj-$(CONFIG_SH_ADC) += adc.o 19 19 obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o 20 - obj-$(CONFIG_SH_FPU) += fpu.o 21 - obj-$(CONFIG_SH_FPU_EMU) += fpu.o 22 20 23 - obj-y += irq/ init.o clock.o hwblk.o proc.o 21 + obj-y += irq/ init.o clock.o fpu.o hwblk.o proc.o
+3 -3
arch/sh/kernel/cpu/shmobile/pm_runtime.c
··· 157 157 might_sleep(); 158 158 159 159 /* catch misconfigured drivers not starting with resume */ 160 - if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags)) { 160 + if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &ad->flags)) { 161 161 ret = -EINVAL; 162 162 goto out; 163 163 } ··· 170 170 171 171 /* put device on idle list */ 172 172 spin_lock_irqsave(&hwblk_lock, flags); 173 - list_add_tail(&pdev->archdata.entry, &hwblk_idle_list); 174 - __set_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags); 173 + list_add_tail(&ad->entry, &hwblk_idle_list); 174 + __set_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags); 175 175 spin_unlock_irqrestore(&hwblk_lock, flags); 176 176 177 177 /* increase idle count */
-15
arch/sh/kernel/dumpstack.c
··· 69 69 } 70 70 } 71 71 72 - static void 73 - print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) 74 - { 75 - printk(data); 76 - print_symbol(msg, symbol); 77 - printk("\n"); 78 - } 79 - 80 - static void print_trace_warning(void *data, char *msg) 81 - { 82 - printk("%s%s\n", (char *)data, msg); 83 - } 84 - 85 72 static int print_trace_stack(void *data, char *name) 86 73 { 87 74 printk("%s <%s> ", (char *)data, name); ··· 85 98 } 86 99 87 100 static const struct stacktrace_ops print_trace_ops = { 88 - .warning = print_trace_warning, 89 - .warning_symbol = print_trace_warning_symbol, 90 101 .stack = print_trace_stack, 91 102 .address = print_trace_address, 92 103 };
+2
arch/sh/kernel/module.c
··· 93 93 #endif 94 94 95 95 switch (ELF32_R_TYPE(rel[i].r_info)) { 96 + case R_SH_NONE: 97 + break; 96 98 case R_SH_DIR32: 97 99 value = get_unaligned(location); 98 100 value += relocation;
-12
arch/sh/kernel/perf_callchain.c
··· 14 14 #include <asm/unwinder.h> 15 15 #include <asm/ptrace.h> 16 16 17 - 18 - static void callchain_warning(void *data, char *msg) 19 - { 20 - } 21 - 22 - static void 23 - callchain_warning_symbol(void *data, char *msg, unsigned long symbol) 24 - { 25 - } 26 - 27 17 static int callchain_stack(void *data, char *name) 28 18 { 29 19 return 0; ··· 28 38 } 29 39 30 40 static const struct stacktrace_ops callchain_ops = { 31 - .warning = callchain_warning, 32 - .warning_symbol = callchain_warning_symbol, 33 41 .stack = callchain_stack, 34 42 .address = callchain_address, 35 43 };
-13
arch/sh/kernel/stacktrace.c
··· 17 17 #include <asm/ptrace.h> 18 18 #include <asm/stacktrace.h> 19 19 20 - static void save_stack_warning(void *data, char *msg) 21 - { 22 - } 23 - 24 - static void 25 - save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) 26 - { 27 - } 28 - 29 20 static int save_stack_stack(void *data, char *name) 30 21 { 31 22 return 0; ··· 42 51 } 43 52 44 53 static const struct stacktrace_ops save_stack_ops = { 45 - .warning = save_stack_warning, 46 - .warning_symbol = save_stack_warning_symbol, 47 54 .stack = save_stack_stack, 48 55 .address = save_stack_address, 49 56 }; ··· 77 88 } 78 89 79 90 static const struct stacktrace_ops save_stack_ops_nosched = { 80 - .warning = save_stack_warning, 81 - .warning_symbol = save_stack_warning_symbol, 82 91 .stack = save_stack_stack, 83 92 .address = save_stack_address_nosched, 84 93 };
+1
arch/sh/kernel/syscalls_32.S
··· 380 380 .long sys_open_by_handle_at /* 360 */ 381 381 .long sys_clock_adjtime 382 382 .long sys_syncfs 383 + .long sys_sendmmsg
+1
arch/sh/kernel/syscalls_64.S
··· 400 400 .long sys_open_by_handle_at 401 401 .long sys_clock_adjtime 402 402 .long sys_syncfs 403 + .long sys_sendmmsg
-13
arch/sh/oprofile/backtrace.c
··· 23 23 #include <asm/sections.h> 24 24 #include <asm/stacktrace.h> 25 25 26 - static void backtrace_warning_symbol(void *data, char *msg, 27 - unsigned long symbol) 28 - { 29 - /* Ignore warnings */ 30 - } 31 - 32 - static void backtrace_warning(void *data, char *msg) 33 - { 34 - /* Ignore warnings */ 35 - } 36 - 37 26 static int backtrace_stack(void *data, char *name) 38 27 { 39 28 /* Yes, we want all stacks */ ··· 38 49 } 39 50 40 51 static struct stacktrace_ops backtrace_ops = { 41 - .warning = backtrace_warning, 42 - .warning_symbol = backtrace_warning_symbol, 43 52 .stack = backtrace_stack, 44 53 .address = backtrace_address, 45 54 };
+17 -14
drivers/clocksource/sh_cmt.c
··· 24 24 #include <linux/ioport.h> 25 25 #include <linux/io.h> 26 26 #include <linux/clk.h> 27 + #include <linux/pm_runtime.h> 27 28 #include <linux/irq.h> 28 29 #include <linux/err.h> 29 30 #include <linux/clocksource.h> ··· 153 152 { 154 153 int ret; 155 154 156 - /* enable clock */ 155 + /* wake up device and enable clock */ 156 + pm_runtime_get_sync(&p->pdev->dev); 157 157 ret = clk_enable(p->clk); 158 158 if (ret) { 159 159 dev_err(&p->pdev->dev, "cannot enable clock\n"); 160 + pm_runtime_put_sync(&p->pdev->dev); 160 161 return ret; 161 162 } 162 163 ··· 190 187 /* disable interrupts in CMT block */ 191 188 sh_cmt_write(p, CMCSR, 0); 192 189 193 - /* stop clock */ 190 + /* stop clock and mark device as idle */ 194 191 clk_disable(p->clk); 192 + pm_runtime_put_sync(&p->pdev->dev); 195 193 } 196 194 197 195 /* private flags */ ··· 420 416 421 417 static int sh_cmt_clocksource_enable(struct clocksource *cs) 422 418 { 419 + int ret; 423 420 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 424 421 425 422 p->total_cycles = 0; 426 423 427 - return sh_cmt_start(p, FLAG_CLOCKSOURCE); 424 + ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); 425 + if (!ret) 426 + __clocksource_updatefreq_hz(cs, p->rate); 427 + return ret; 428 428 } 429 429 430 430 static void sh_cmt_clocksource_disable(struct clocksource *cs) ··· 456 448 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 457 449 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 458 450 459 - /* clk_get_rate() needs an enabled clock */ 460 - clk_enable(p->clk); 461 - p->rate = clk_get_rate(p->clk) / ((p->width == 16) ? 512 : 8); 462 - clk_disable(p->clk); 463 - 464 - /* TODO: calculate good shift from rate and counter bit width */ 465 - cs->shift = 0; 466 - cs->mult = clocksource_hz2mult(p->rate, cs->shift); 467 - 468 451 dev_info(&p->pdev->dev, "used as clock source\n"); 469 452 470 - clocksource_register(cs); 471 - 453 + /* Register with dummy 1 Hz value, gets updated in ->enable() */ 454 + clocksource_register_hz(cs, 1); 472 455 return 0; 473 456 } 474 457 ··· 664 665 665 666 if (p) { 666 667 dev_info(&pdev->dev, "kept as earlytimer\n"); 668 + pm_runtime_enable(&pdev->dev); 667 669 return 0; 668 670 } 669 671 ··· 679 679 kfree(p); 680 680 platform_set_drvdata(pdev, NULL); 681 681 } 682 + 683 + if (!is_early_platform_device(pdev)) 684 + pm_runtime_enable(&pdev->dev); 682 685 return ret; 683 686 } 684 687
+18 -13
drivers/clocksource/sh_tmu.c
··· 25 25 #include <linux/delay.h> 26 26 #include <linux/io.h> 27 27 #include <linux/clk.h> 28 + #include <linux/pm_runtime.h> 28 29 #include <linux/irq.h> 29 30 #include <linux/err.h> 30 31 #include <linux/clocksource.h> ··· 110 109 { 111 110 int ret; 112 111 113 - /* enable clock */ 112 + /* wake up device and enable clock */ 113 + pm_runtime_get_sync(&p->pdev->dev); 114 114 ret = clk_enable(p->clk); 115 115 if (ret) { 116 116 dev_err(&p->pdev->dev, "cannot enable clock\n"); 117 + pm_runtime_put_sync(&p->pdev->dev); 117 118 return ret; 118 119 } 119 120 ··· 144 141 /* disable interrupts in TMU block */ 145 142 sh_tmu_write(p, TCR, 0x0000); 146 143 147 - /* stop clock */ 144 + /* stop clock and mark device as idle */ 148 145 clk_disable(p->clk); 146 + pm_runtime_put_sync(&p->pdev->dev); 149 147 } 150 148 151 149 static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, ··· 203 199 static int sh_tmu_clocksource_enable(struct clocksource *cs) 204 200 { 205 201 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 202 + int ret; 206 203 207 - return sh_tmu_enable(p); 204 + ret = sh_tmu_enable(p); 205 + if (!ret) 206 + __clocksource_updatefreq_hz(cs, p->rate); 207 + return ret; 208 208 } 209 209 210 210 static void sh_tmu_clocksource_disable(struct clocksource *cs) ··· 229 221 cs->mask = CLOCKSOURCE_MASK(32); 230 222 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 231 223 232 - /* clk_get_rate() needs an enabled clock */ 233 - clk_enable(p->clk); 234 - /* channel will be configured at parent clock / 4 */ 235 - p->rate = clk_get_rate(p->clk) / 4; 236 - clk_disable(p->clk); 237 - /* TODO: calculate good shift from rate and counter bit width */ 238 - cs->shift = 10; 239 - cs->mult = clocksource_hz2mult(p->rate, cs->shift); 240 - 241 224 dev_info(&p->pdev->dev, "used as clock source\n"); 242 - clocksource_register(cs); 225 + 226 + /* Register with dummy 1 Hz value, gets updated in ->enable() */ 227 + clocksource_register_hz(cs, 1); 243 228 return 0; 244 229 } 245 230 ··· 415 414 416 415 if (p) { 417 416 dev_info(&pdev->dev, "kept as earlytimer\n"); 417 + pm_runtime_enable(&pdev->dev); 418 418 return 0; 419 419 } 420 420 ··· 430 428 kfree(p); 431 429 platform_set_drvdata(pdev, NULL); 432 430 } 431 + 432 + if (!is_early_platform_device(pdev)) 433 + pm_runtime_enable(&pdev->dev); 433 434 return ret; 434 435 } 435 436
+144 -44
drivers/dma/shdma.c
··· 48 48 49 49 /* 50 50 * Used for write-side mutual exclusion for the global device list, 51 - * read-side synchronization by way of RCU. 51 + * read-side synchronization by way of RCU, and per-controller data. 52 52 */ 53 53 static DEFINE_SPINLOCK(sh_dmae_lock); 54 54 static LIST_HEAD(sh_dmae_devices); ··· 85 85 */ 86 86 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 87 87 { 88 - unsigned short dmaor = dmaor_read(shdev); 88 + unsigned short dmaor; 89 + unsigned long flags; 89 90 91 + spin_lock_irqsave(&sh_dmae_lock, flags); 92 + 93 + dmaor = dmaor_read(shdev); 90 94 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 95 + 96 + spin_unlock_irqrestore(&sh_dmae_lock, flags); 91 97 } 92 98 93 99 static int sh_dmae_rst(struct sh_dmae_device *shdev) 94 100 { 95 101 unsigned short dmaor; 102 + unsigned long flags; 96 103 97 - sh_dmae_ctl_stop(shdev); 98 - dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; 104 + spin_lock_irqsave(&sh_dmae_lock, flags); 99 105 100 - dmaor_write(shdev, dmaor); 101 - if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { 102 - pr_warning("dma-sh: Can't initialize DMAOR.\n"); 103 - return -EINVAL; 106 + dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 107 + 108 + dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 109 + 110 + dmaor = dmaor_read(shdev); 111 + 112 + spin_unlock_irqrestore(&sh_dmae_lock, flags); 113 + 114 + if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 115 + dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 116 + return -EIO; 104 117 } 105 118 return 0; 106 119 } ··· 197 184 198 185 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 199 186 { 200 - /* When DMA was working, can not set data to CHCR */ 187 + /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 201 188 if (dmae_is_busy(sh_chan)) 202 189 return -EBUSY; 203 190 ··· 387 374 LIST_HEAD(list); 388 375 int descs = sh_chan->descs_allocated; 389 376 377 + /* Protect against ISR */ 378 + spin_lock_irq(&sh_chan->desc_lock); 390 379 dmae_halt(sh_chan); 380 + spin_unlock_irq(&sh_chan->desc_lock); 381 + 382 + /* Now no new interrupts will occur */ 391 383 392 384 /* Prepared and not submitted descriptors can still be on the queue */ 393 385 if (!list_empty(&sh_chan->ld_queue)) ··· 402 384 /* The caller is holding dma_list_mutex */ 403 385 struct sh_dmae_slave *param = chan->private; 404 386 clear_bit(param->slave_id, sh_dmae_slave_used); 387 + chan->private = NULL; 405 388 } 406 389 407 390 spin_lock_bh(&sh_chan->desc_lock); ··· 582 563 if (!chan || !len) 583 564 return NULL; 584 565 585 - chan->private = NULL; 586 - 587 566 sh_chan = to_sh_chan(chan); 588 567 589 568 sg_init_table(&sg, 1); ··· 637 620 if (!chan) 638 621 return -EINVAL; 639 622 623 + spin_lock_bh(&sh_chan->desc_lock); 640 624 dmae_halt(sh_chan); 641 625 642 - spin_lock_bh(&sh_chan->desc_lock); 643 626 if (!list_empty(&sh_chan->ld_queue)) { 644 627 /* Record partial transfer */ 645 628 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, ··· 733 716 list_move(&desc->node, &sh_chan->ld_free); 734 717 } 735 718 } 719 + 720 + if (all && !callback) 721 + /* 722 + * Terminating and the loop completed normally: forgive 723 + * uncompleted cookies 724 + */ 725 + sh_chan->completed_cookie = sh_chan->common.cookie; 726 + 736 727 spin_unlock_bh(&sh_chan->desc_lock); 737 728 738 729 if (callback) ··· 758 733 { 759 734 while (__ld_cleanup(sh_chan, all)) 760 735 ; 761 - 762 - if (all) 763 - /* Terminating - forgive uncompleted cookies */ 764 - sh_chan->completed_cookie = sh_chan->common.cookie; 765 736 } 766 737 767 738 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) ··· 803 782 804 783 sh_dmae_chan_ld_cleanup(sh_chan, false); 805 784 806 - last_used = chan->cookie; 785 + /* First read completed cookie to avoid a skew */ 807 786 last_complete = sh_chan->completed_cookie; 787 + rmb(); 788 + last_used = chan->cookie; 808 789 BUG_ON(last_complete < 0); 809 790 dma_set_tx_state(txstate, last_complete, last_used, 0); 810 791 ··· 836 813 static irqreturn_t sh_dmae_interrupt(int irq, void *data) 837 814 { 838 815 irqreturn_t ret = IRQ_NONE; 839 - struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 840 - u32 chcr = sh_dmae_readl(sh_chan, CHCR); 816 + struct sh_dmae_chan *sh_chan = data; 817 + u32 chcr; 818 + 819 + spin_lock(&sh_chan->desc_lock); 820 + 821 + chcr = sh_dmae_readl(sh_chan, CHCR); 841 822 842 823 if (chcr & CHCR_TE) { 843 824 /* DMA stop */ ··· 851 824 tasklet_schedule(&sh_chan->tasklet); 852 825 } 853 826 827 + spin_unlock(&sh_chan->desc_lock); 828 + 854 829 return ret; 855 830 } 856 831 857 - static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev) 832 + /* Called from error IRQ or NMI */ 833 + static bool sh_dmae_reset(struct sh_dmae_device *shdev) 858 834 { 859 835 unsigned int handled = 0; 860 836 int i; ··· 869 839 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 870 840 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 871 841 struct sh_desc *desc; 842 + LIST_HEAD(dl); 872 843 873 844 if (!sh_chan) 874 845 continue; 875 846 847 + spin_lock(&sh_chan->desc_lock); 848 + 876 849 /* Stop the channel */ 877 850 dmae_halt(sh_chan); 878 851 852 + list_splice_init(&sh_chan->ld_queue, &dl); 853 + 854 + spin_unlock(&sh_chan->desc_lock); 855 + 879 856 /* Complete all */ 880 - list_for_each_entry(desc, &sh_chan->ld_queue, node) { 857 + list_for_each_entry(desc, &dl, node) { 881 858 struct dma_async_tx_descriptor *tx = &desc->async_tx; 882 859 desc->mark = DESC_IDLE; 883 860 if (tx->callback) 884 861 tx->callback(tx->callback_param); 885 862 } 886 863 887 - list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); 864 + spin_lock(&sh_chan->desc_lock); 865 + list_splice(&dl, &sh_chan->ld_free); 866 + spin_unlock(&sh_chan->desc_lock); 867 + 888 868 handled++; 889 869 } 890 870 ··· 907 867 { 908 868 struct sh_dmae_device *shdev = data; 909 869 910 - if (dmaor_read(shdev) & DMAOR_AE) 911 - return IRQ_RETVAL(sh_dmae_reset(data)); 912 - else 870 + if (!(dmaor_read(shdev) & DMAOR_AE)) 913 871 return IRQ_NONE; 872 + 873 + sh_dmae_reset(data); 874 + return IRQ_HANDLED; 914 875 } 915 876 916 877 static void dmae_do_tasklet(unsigned long data) ··· 943 902 944 903 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 945 904 { 946 - unsigned int handled; 947 - 948 905 /* Fast path out if NMIF is not asserted for this controller */ 949 906 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 950 907 return false; 951 908 952 - handled = sh_dmae_reset(shdev); 953 - if (handled) 954 - return true; 955 - 956 - return false; 909 + return sh_dmae_reset(shdev); 957 910 } 958 911 959 912 static int sh_dmae_nmi_handler(struct notifier_block *self, ··· 1017 982 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 1018 983 (unsigned long)new_sh_chan); 1019 984 1020 - /* Init the channel */ 1021 - dmae_init(new_sh_chan); 1022 - 1023 985 spin_lock_init(&new_sh_chan->desc_lock); 1024 986 1025 987 /* Init descripter manage list */ ··· 1077 1045 struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 1078 1046 unsigned long irqflags = IRQF_DISABLED, 1079 1047 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1080 - unsigned long flags; 1081 1048 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1082 1049 int err, i, irq_cnt = 0, irqres = 0; 1083 1050 struct sh_dmae_device *shdev; ··· 1142 1111 pm_runtime_enable(&pdev->dev); 1143 1112 pm_runtime_get_sync(&pdev->dev); 1144 1113 1145 - spin_lock_irqsave(&sh_dmae_lock, flags); 1114 + spin_lock_irq(&sh_dmae_lock); 1146 1115 list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 1147 - spin_unlock_irqrestore(&sh_dmae_lock, flags); 1116 + spin_unlock_irq(&sh_dmae_lock); 1148 1117 1149 - /* reset dma controller */ 1118 + /* reset dma controller - only needed as a test */ 1150 1119 err = sh_dmae_rst(shdev); 1151 1120 if (err) 1152 1121 goto rst_err; ··· 1249 1218 eirq_err: 1250 1219 #endif 1251 1220 rst_err: 1252 - spin_lock_irqsave(&sh_dmae_lock, flags); 1221 + spin_lock_irq(&sh_dmae_lock); 1253 1222 list_del_rcu(&shdev->node); 1254 - spin_unlock_irqrestore(&sh_dmae_lock, flags); 1223 + spin_unlock_irq(&sh_dmae_lock); 1255 1224 1256 1225 pm_runtime_put(&pdev->dev); 1226 + pm_runtime_disable(&pdev->dev); 1227 + 1257 1228 if (dmars) 1258 1229 iounmap(shdev->dmars); 1259 1230 emapdmars: 1260 1231 iounmap(shdev->chan_reg); 1232 + synchronize_rcu(); 1261 1233 emapchan: 1262 1234 kfree(shdev); 1263 1235 ealloc: ··· 1276 1242 { 1277 1243 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1278 1244 struct resource *res; 1279 - unsigned long flags; 1280 1245 int errirq = platform_get_irq(pdev, 0); 1281 1246 1282 1247 dma_async_device_unregister(&shdev->common); ··· 1283 1250 if (errirq > 0) 1284 1251 free_irq(errirq, shdev); 1285 1252 1286 - spin_lock_irqsave(&sh_dmae_lock, flags); 1253 + spin_lock_irq(&sh_dmae_lock); 1287 1254 list_del_rcu(&shdev->node); 1288 - spin_unlock_irqrestore(&sh_dmae_lock, flags); 1255 + spin_unlock_irq(&sh_dmae_lock); 1289 1256 1290 1257 /* channel data remove */ 1291 1258 sh_dmae_chan_remove(shdev); ··· 1296 1263 iounmap(shdev->dmars); 1297 1264 iounmap(shdev->chan_reg); 1298 1265 1266 + synchronize_rcu(); 1299 1267 kfree(shdev); 1300 1268 1301 1269 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 1315 1281 sh_dmae_ctl_stop(shdev); 1316 1282 } 1317 1283 1284 + static int sh_dmae_runtime_suspend(struct device *dev) 1285 + { 1286 + return 0; 1287 + } 1288 + 1289 + static int sh_dmae_runtime_resume(struct device *dev) 1290 + { 1291 + struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1292 + 1293 + return sh_dmae_rst(shdev); 1294 + } 1295 + 1296 + #ifdef CONFIG_PM 1297 + static int sh_dmae_suspend(struct device *dev) 1298 + { 1299 + struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1300 + int i; 1301 + 1302 + for (i = 0; i < shdev->pdata->channel_num; i++) { 1303 + struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1304 + if (sh_chan->descs_allocated) 1305 + sh_chan->pm_error = pm_runtime_put_sync(dev); 1306 + } 1307 + 1308 + return 0; 1309 + } 1310 + 1311 + static int sh_dmae_resume(struct device *dev) 1312 + { 1313 + struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1314 + int i; 1315 + 1316 + for (i = 0; i < shdev->pdata->channel_num; i++) { 1317 + struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1318 + struct sh_dmae_slave *param = sh_chan->common.private; 1319 + 1320 + if (!sh_chan->descs_allocated) 1321 + continue; 1322 + 1323 + if (!sh_chan->pm_error) 1324 + pm_runtime_get_sync(dev); 1325 + 1326 + if (param) { 1327 + const struct sh_dmae_slave_config *cfg = param->config; 1328 + dmae_set_dmars(sh_chan, cfg->mid_rid); 1329 + dmae_set_chcr(sh_chan, cfg->chcr); 1330 + } else { 1331 + dmae_init(sh_chan); 1332 + } 1333 + } 1334 + 1335 + return 0; 1336 + } 1337 + #else 1338 + #define sh_dmae_suspend NULL 1339 + #define sh_dmae_resume NULL 1340 + #endif 1341 + 1342 + const struct dev_pm_ops sh_dmae_pm = { 1343 + .suspend = sh_dmae_suspend, 1344 + .resume = sh_dmae_resume, 1345 + .runtime_suspend = sh_dmae_runtime_suspend, 1346 + .runtime_resume = sh_dmae_runtime_resume, 1347 + }; 1348 + 1318 1349 static struct platform_driver sh_dmae_driver = { 1319 1350 .remove = __exit_p(sh_dmae_remove), 1320 1351 .shutdown = sh_dmae_shutdown, 1321 1352 .driver = { 1322 1353 .owner = THIS_MODULE, 1323 1354 .name = "sh-dma-engine", 1355 + .pm = &sh_dmae_pm, 1324 1356 }, 1325 1357 }; 1326 1358
+1
drivers/dma/shdma.h
··· 37 37 int id; /* Raw id of this channel */ 38 38 u32 __iomem *base; 39 39 char dev_id[16]; /* unique name per DMAC of channel */ 40 + int pm_error; 40 41 }; 41 42 42 43 struct sh_dmae_device {
+1
drivers/i2c/busses/i2c-sh_mobile.c
··· 729 729 MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); 730 730 MODULE_AUTHOR("Magnus Damm"); 731 731 MODULE_LICENSE("GPL v2"); 732 + MODULE_ALIAS("platform:i2c-sh_mobile");
+1 -1
drivers/sh/clk/cpg.c
··· 105 105 106 106 /* Rebuild the frequency table */ 107 107 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, 108 - table, &clk->arch_flags); 108 + table, NULL); 109 109 110 110 return 0; 111 111 }
+5
drivers/sh/intc/virq.c
··· 235 235 236 236 irq_set_handler_data(irq, (void *)entry->handle); 237 237 238 + /* 239 + * Set the virtual IRQ as non-threadable. 240 + */ 241 + irq_set_nothread(irq); 242 + 238 243 irq_set_chained_handler(entry->pirq, intc_virq_handler); 239 244 add_virq_to_pirq(entry->pirq, irq); 240 245
+15 -1
drivers/tty/serial/sh-sci.c
··· 41 41 #include <linux/platform_device.h> 42 42 #include <linux/serial_sci.h> 43 43 #include <linux/notifier.h> 44 + #include <linux/pm_runtime.h> 44 45 #include <linux/cpufreq.h> 45 46 #include <linux/clk.h> 46 47 #include <linux/ctype.h> ··· 561 560 { 562 561 struct sci_port *port = (struct sci_port *)data; 563 562 563 + if (port->enable) 564 + port->enable(&port->port); 565 + 564 566 if (sci_rxd_in(&port->port) == 0) { 565 567 port->break_flag = 1; 566 568 sci_schedule_break_timer(port); ··· 573 569 sci_schedule_break_timer(port); 574 570 } else 575 571 port->break_flag = 0; 572 + 573 + if (port->disable) 574 + port->disable(&port->port); 576 575 } 577 576 578 577 static int sci_handle_errors(struct uart_port *port) ··· 844 837 { 845 838 struct sci_port *sci_port = to_sci_port(port); 846 839 840 + pm_runtime_get_sync(port->dev); 841 + 847 842 clk_enable(sci_port->iclk); 848 843 sci_port->port.uartclk = clk_get_rate(sci_port->iclk); 849 844 clk_enable(sci_port->fclk); ··· 857 848 858 849 clk_disable(sci_port->fclk); 859 850 clk_disable(sci_port->iclk); 851 + 852 + pm_runtime_put_sync(port->dev); 860 853 } 861 854 862 855 static int sci_request_irq(struct sci_port *port) ··· 1767 1756 sci_port->enable = sci_clk_enable; 1768 1757 sci_port->disable = sci_clk_disable; 1769 1758 port->dev = &dev->dev; 1759 + 1760 + pm_runtime_enable(&dev->dev); 1770 1761 } 1771 1762 1772 1763 sci_port->break_timer.data = (unsigned long)sci_port; ··· 1788 1775 * 1789 1776 * For the muxed case there's nothing more to do. 1790 1777 */ 1791 - port->irq = p->irqs[SCIx_TXI_IRQ]; 1778 + port->irq = p->irqs[SCIx_RXI_IRQ]; 1792 1779 1793 1780 if (p->dma_dev) 1794 1781 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", ··· 1949 1936 clk_put(port->iclk); 1950 1937 clk_put(port->fclk); 1951 1938 1939 + pm_runtime_disable(&dev->dev); 1952 1940 return 0; 1953 1941 } 1954 1942
+8 -8
drivers/tty/serial/sh-sci.h
··· 270 270 #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ 271 271 defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 272 272 defined(CONFIG_CPU_SUBTYPE_SH7721) || \ 273 - defined(CONFIG_ARCH_SH73A0) || \ 274 - defined(CONFIG_ARCH_SH7367) || \ 275 - defined(CONFIG_ARCH_SH7377) 273 + defined(CONFIG_ARCH_SH7367) 276 274 #define SCIF_FNS(name, scif_offset, scif_size) \ 277 275 CPU_SCIF_FNS(name, scif_offset, scif_size) 278 - #elif defined(CONFIG_ARCH_SH7372) 276 + #elif defined(CONFIG_ARCH_SH7377) || \ 277 + defined(CONFIG_ARCH_SH7372) || \ 278 + defined(CONFIG_ARCH_SH73A0) 279 279 #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \ 280 280 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) 281 281 #define SCIF_FNS(name, scif_offset, scif_size) \ ··· 313 313 #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ 314 314 defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 315 315 defined(CONFIG_CPU_SUBTYPE_SH7721) || \ 316 - defined(CONFIG_ARCH_SH73A0) || \ 317 - defined(CONFIG_ARCH_SH7367) || \ 318 - defined(CONFIG_ARCH_SH7377) 316 + defined(CONFIG_ARCH_SH7367) 319 317 320 318 SCIF_FNS(SCSMR, 0x00, 16) 321 319 SCIF_FNS(SCBRR, 0x04, 8) ··· 324 326 SCIF_FNS(SCxTDR, 0x20, 8) 325 327 SCIF_FNS(SCxRDR, 0x24, 8) 326 328 SCIF_FNS(SCLSR, 0x00, 0) 327 - #elif defined(CONFIG_ARCH_SH7372) 329 + #elif defined(CONFIG_ARCH_SH7377) || \ 330 + defined(CONFIG_ARCH_SH7372) || \ 331 + defined(CONFIG_ARCH_SH73A0) 328 332 SCIF_FNS(SCSMR, 0x00, 16) 329 333 SCIF_FNS(SCBRR, 0x04, 8) 330 334 SCIF_FNS(SCSCR, 0x08, 16)