+1
Documentation/accounting/getdelays.c
+1
Documentation/accounting/getdelays.c
-5
Documentation/kernel-parameters.txt
-5
Documentation/kernel-parameters.txt
···
2175
2175
reset_devices [KNL] Force drivers to reset the underlying device
2176
2176
during initialization.
2177
2177
2178
-
resource_alloc_from_bottom
2179
-
Allocate new resources from the beginning of available
2180
-
space, not the end. If you need to use this, please
2181
-
report a bug.
2182
-
2183
2178
resume= [SWSUSP]
2184
2179
Specify the partition device for software suspend
2185
2180
+30
-27
Documentation/scsi/scsi_mid_low_api.txt
+30
-27
Documentation/scsi/scsi_mid_low_api.txt
···
1044
1044
1045
1045
1046
1046
/**
1047
-
* queuecommand - queue scsi command, invoke 'done' on completion
1047
+
* queuecommand - queue scsi command, invoke scp->scsi_done on completion
1048
+
* @shost: pointer to the scsi host object
1048
1049
* @scp: pointer to scsi command object
1049
-
* @done: function pointer to be invoked on completion
1050
1050
*
1051
1051
* Returns 0 on success.
1052
1052
*
···
1074
1074
*
1075
1075
* Other types of errors that are detected immediately may be
1076
1076
* flagged by setting scp->result to an appropriate value,
1077
-
* invoking the 'done' callback, and then returning 0 from this
1078
-
* function. If the command is not performed immediately (and the
1079
-
* LLD is starting (or will start) the given command) then this
1080
-
* function should place 0 in scp->result and return 0.
1077
+
* invoking the scp->scsi_done callback, and then returning 0
1078
+
* from this function. If the command is not performed
1079
+
* immediately (and the LLD is starting (or will start) the given
1080
+
* command) then this function should place 0 in scp->result and
1081
+
* return 0.
1081
1082
*
1082
1083
* Command ownership. If the driver returns zero, it owns the
1083
-
* command and must take responsibility for ensuring the 'done'
1084
-
* callback is executed. Note: the driver may call done before
1085
-
* returning zero, but after it has called done, it may not
1086
-
* return any value other than zero. If the driver makes a
1087
-
* non-zero return, it must not execute the command's done
1088
-
* callback at any time.
1084
+
* command and must take responsibility for ensuring the
1085
+
* scp->scsi_done callback is executed. Note: the driver may
1086
+
* call scp->scsi_done before returning zero, but after it has
1087
+
* called scp->scsi_done, it may not return any value other than
1088
+
* zero. If the driver makes a non-zero return, it must not
1089
+
* execute the command's scsi_done callback at any time.
1089
1090
*
1090
-
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
1091
-
* and is expected to be held on return.
1091
+
* Locks: up to and including 2.6.36, struct Scsi_Host::host_lock
1092
+
* held on entry (with "irqsave") and is expected to be
1093
+
* held on return. From 2.6.37 onwards, queuecommand is
1094
+
* called without any locks held.
1092
1095
*
1093
1096
* Calling context: in interrupt (soft irq) or process context
1094
1097
*
1095
-
* Notes: This function should be relatively fast. Normally it will
1096
-
* not wait for IO to complete. Hence the 'done' callback is invoked
1097
-
* (often directly from an interrupt service routine) some time after
1098
-
* this function has returned. In some cases (e.g. pseudo adapter
1099
-
* drivers that manufacture the response to a SCSI INQUIRY)
1100
-
* the 'done' callback may be invoked before this function returns.
1101
-
* If the 'done' callback is not invoked within a certain period
1102
-
* the SCSI mid level will commence error processing.
1103
-
* If a status of CHECK CONDITION is placed in "result" when the
1104
-
* 'done' callback is invoked, then the LLD driver should
1105
-
* perform autosense and fill in the struct scsi_cmnd::sense_buffer
1098
+
* Notes: This function should be relatively fast. Normally it
1099
+
* will not wait for IO to complete. Hence the scp->scsi_done
1100
+
* callback is invoked (often directly from an interrupt service
1101
+
* routine) some time after this function has returned. In some
1102
+
* cases (e.g. pseudo adapter drivers that manufacture the
1103
+
* response to a SCSI INQUIRY) the scp->scsi_done callback may be
1104
+
* invoked before this function returns. If the scp->scsi_done
1105
+
* callback is not invoked within a certain period the SCSI mid
1106
+
* level will commence error processing. If a status of CHECK
1107
+
* CONDITION is placed in "result" when the scp->scsi_done
1108
+
* callback is invoked, then the LLD driver should perform
1109
+
* autosense and fill in the struct scsi_cmnd::sense_buffer
1106
1110
* array. The scsi_cmnd::sense_buffer array is zeroed prior to
1107
1111
* the mid level queuing a command to an LLD.
1108
1112
*
1109
1113
* Defined in: LLD
1110
1114
**/
1111
-
int queuecommand(struct scsi_cmnd * scp,
1112
-
void (*done)(struct scsi_cmnd *))
1115
+
int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp)
1113
1116
1114
1117
1115
1118
/**
+10
-1
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+10
-1
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
···
373
373
print " $regex_lru_isolate/o\n";
374
374
next;
375
375
}
376
+
my $isolate_mode = $1;
376
377
my $nr_scanned = $4;
377
378
my $nr_contig_dirty = $7;
378
-
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
379
+
380
+
# To closer match vmstat scanning statistics, only count isolate_both
381
+
# and isolate_inactive as scanning. isolate_active is rotation
382
+
# isolate_inactive == 0
383
+
# isolate_active == 1
384
+
# isolate_both == 2
385
+
if ($isolate_mode != 1) {
386
+
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
387
+
}
379
388
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
380
389
} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
381
390
$details = $5;
+2
-2
MAINTAINERS
+2
-2
MAINTAINERS
···
405
405
F: drivers/usb/gadget/amd5536udc.*
406
406
407
407
AMD GEODE PROCESSOR/CHIPSET SUPPORT
408
-
P: Jordan Crouse
408
+
P: Andres Salomon <dilinger@queued.net>
409
409
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
410
410
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
411
411
S: Supported
···
4605
4605
F: include/pcmcia/
4606
4606
4607
4607
PCNET32 NETWORK DRIVER
4608
-
M: Don Fry <pcnet32@verizon.net>
4608
+
M: Don Fry <pcnet32@frontier.com>
4609
4609
L: netdev@vger.kernel.org
4610
4610
S: Maintained
4611
4611
F: drivers/net/pcnet32.c
+1
-1
Makefile
+1
-1
Makefile
+1
arch/arm/common/it8152.c
+1
arch/arm/common/it8152.c
+1
arch/arm/include/asm/hardware/it8152.h
+1
arch/arm/include/asm/hardware/it8152.h
+2
arch/arm/mach-at91/include/mach/at91_mci.h
+2
arch/arm/mach-at91/include/mach/at91_mci.h
···
74
74
#define AT91_MCI_TRTYP_BLOCK (0 << 19)
75
75
#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
76
76
#define AT91_MCI_TRTYP_STREAM (2 << 19)
77
+
#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
78
+
#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
77
79
78
80
#define AT91_MCI_BLKR 0x18 /* Block Register */
79
81
#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
+1
-1
arch/arm/mach-ixp4xx/common-pci.c
+1
-1
arch/arm/mach-ixp4xx/common-pci.c
+2
-2
arch/arm/mach-pxa/sleep.S
+2
-2
arch/arm/mach-pxa/sleep.S
···
353
353
354
354
@ Let us ensure we jump to resume_after_mmu only when the mcr above
355
355
@ actually took effect. They call it the "cpwait" operation.
356
-
mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
357
-
sub pc, r2, r1, lsr #32 @ jump to virtual addr
356
+
mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15
357
+
sub pc, r2, r0, lsr #32 @ jump to virtual addr
358
358
nop
359
359
nop
360
360
nop
+7
arch/arm/mach-s3c2412/Kconfig
+7
arch/arm/mach-s3c2412/Kconfig
···
28
28
29
29
config S3C2412_PM
30
30
bool
31
+
select S3C2412_PM_SLEEP
31
32
help
32
33
Internal config node to apply S3C2412 power management
34
+
35
+
config S3C2412_PM_SLEEP
36
+
bool
37
+
help
38
+
Internal config node to apply sleep for S3C2412 power management.
39
+
Can be selected by another SoCs with similar sleep procedure.
33
40
34
41
# Note, the S3C2412 IOtiming support is in plat-s3c24xx
35
42
+2
-1
arch/arm/mach-s3c2412/Makefile
+2
-1
arch/arm/mach-s3c2412/Makefile
···
14
14
obj-$(CONFIG_CPU_S3C2412) += clock.o
15
15
obj-$(CONFIG_CPU_S3C2412) += gpio.o
16
16
obj-$(CONFIG_S3C2412_DMA) += dma.o
17
-
obj-$(CONFIG_S3C2412_PM) += pm.o sleep.o
17
+
obj-$(CONFIG_S3C2412_PM) += pm.o
18
+
obj-$(CONFIG_S3C2412_PM_SLEEP) += sleep.o
18
19
obj-$(CONFIG_S3C2412_CPUFREQ) += cpu-freq.o
19
20
20
21
# Machine support
+1
arch/arm/mach-s3c2416/Kconfig
+1
arch/arm/mach-s3c2416/Kconfig
+6
arch/arm/mach-s5pv210/mach-aquila.c
+6
arch/arm/mach-s5pv210/mach-aquila.c
···
378
378
static struct max8998_platform_data aquila_max8998_pdata = {
379
379
.num_regulators = ARRAY_SIZE(aquila_regulators),
380
380
.regulators = aquila_regulators,
381
+
.buck1_set1 = S5PV210_GPH0(3),
382
+
.buck1_set2 = S5PV210_GPH0(4),
383
+
.buck2_set3 = S5PV210_GPH0(5),
384
+
.buck1_max_voltage1 = 1200000,
385
+
.buck1_max_voltage2 = 1200000,
386
+
.buck2_max_voltage = 1200000,
381
387
};
382
388
#endif
383
389
+6
arch/arm/mach-s5pv210/mach-goni.c
+6
arch/arm/mach-s5pv210/mach-goni.c
···
518
518
static struct max8998_platform_data goni_max8998_pdata = {
519
519
.num_regulators = ARRAY_SIZE(goni_regulators),
520
520
.regulators = goni_regulators,
521
+
.buck1_set1 = S5PV210_GPH0(3),
522
+
.buck1_set2 = S5PV210_GPH0(4),
523
+
.buck2_set3 = S5PV210_GPH0(5),
524
+
.buck1_max_voltage1 = 1200000,
525
+
.buck1_max_voltage2 = 1200000,
526
+
.buck2_max_voltage = 1200000,
521
527
};
522
528
#endif
523
529
+26
-4
arch/arm/mach-shmobile/include/mach/entry-macro.S
+26
-4
arch/arm/mach-shmobile/include/mach/entry-macro.S
···
1
1
/*
2
+
* Copyright (C) 2010 Magnus Damm
2
3
* Copyright (C) 2008 Renesas Solutions Corp.
3
4
*
4
5
* This program is free software; you can redistribute it and/or modify
···
15
14
* along with this program; if not, write to the Free Software
16
15
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
16
*/
18
-
#include <mach/hardware.h>
19
17
#include <mach/irqs.h>
18
+
19
+
#define INTCA_BASE 0xe6980000
20
+
#define INTFLGA_OFFS 0x00000018 /* accept pending interrupt */
21
+
#define INTEVTA_OFFS 0x00000020 /* vector number of accepted interrupt */
22
+
#define INTLVLA_OFFS 0x00000030 /* priority level of accepted interrupt */
23
+
#define INTLVLB_OFFS 0x00000034 /* previous priority level */
20
24
21
25
.macro disable_fiq
22
26
.endm
23
27
24
28
.macro get_irqnr_preamble, base, tmp
25
-
ldr \base, =INTFLGA
29
+
ldr \base, =INTCA_BASE
26
30
.endm
27
31
28
32
.macro arch_ret_to_user, tmp1, tmp2
29
33
.endm
30
34
31
35
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
32
-
ldr \irqnr, [\base]
36
+
/* The single INTFLGA read access below results in the following:
37
+
*
38
+
* 1. INTLVLB is updated with old priority value from INTLVLA
39
+
* 2. Highest priority interrupt is accepted
40
+
* 3. INTLVLA is updated to contain priority of accepted interrupt
41
+
* 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA
42
+
*/
43
+
ldr \irqnr, [\base, #INTFLGA_OFFS]
44
+
45
+
/* Restore INTLVLA with the value saved in INTLVLB.
46
+
* This is required to support interrupt priorities properly.
47
+
*/
48
+
ldrb \tmp, [\base, #INTLVLB_OFFS]
49
+
strb \tmp, [\base, #INTLVLA_OFFS]
50
+
51
+
/* Handle invalid vector number case */
33
52
cmp \irqnr, #0
34
53
beq 1000f
35
-
/* intevt to irq number */
54
+
55
+
/* Convert vector to irq number, same as the evt2irq() macro */
36
56
lsr \irqnr, \irqnr, #0x5
37
57
subs \irqnr, \irqnr, #16
38
58
+1
-1
arch/arm/mach-shmobile/include/mach/vmalloc.h
+1
-1
arch/arm/mach-shmobile/include/mach/vmalloc.h
+1
-1
arch/arm/plat-s3c24xx/Kconfig
+1
-1
arch/arm/plat-s3c24xx/Kconfig
+4
arch/mips/mm/sc-mips.c
+4
arch/mips/mm/sc-mips.c
···
68
68
*/
69
69
static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
70
70
{
71
+
unsigned int config2 = read_c0_config2();
72
+
unsigned int tmp;
73
+
71
74
/* Check the bypass bit (L2B) */
72
75
switch (c->cputype) {
73
76
case CPU_34K:
···
86
83
c->scache.linesz = 2 << tmp;
87
84
else
88
85
return 0;
86
+
return 1;
89
87
}
90
88
91
89
static inline int __init mips_sc_probe(void)
+3
-7
arch/mn10300/kernel/time.c
+3
-7
arch/mn10300/kernel/time.c
···
40
40
unsigned long long ll;
41
41
unsigned l[2];
42
42
} tsc64, result;
43
-
unsigned long tsc, tmp;
43
+
unsigned long tmp;
44
44
unsigned product[3]; /* 96-bit intermediate value */
45
45
46
46
/* cnt32_to_63() is not safe with preemption */
47
47
preempt_disable();
48
48
49
-
/* read the TSC value
50
-
*/
51
-
tsc = get_cycles();
52
-
53
-
/* expand to 64-bits.
49
+
/* expand the tsc to 64-bits.
54
50
* - sched_clock() must be called once a minute or better or the
55
51
* following will go horribly wrong - see cnt32_to_63()
56
52
*/
57
-
tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
53
+
tsc64.ll = cnt32_to_63(get_cycles()) & 0x7fffffffffffffffULL;
58
54
59
55
preempt_enable();
60
56
+1
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+1
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+1
-1
arch/sh/boards/mach-se/7206/irq.c
+1
-1
arch/sh/boards/mach-se/7206/irq.c
···
140
140
make_se7206_irq(IRQ1_IRQ); /* ATA */
141
141
make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
142
142
143
-
__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
143
+
__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR1); /* ICR1 */
144
144
145
145
/* FPGA System register setup*/
146
146
__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
+1
-1
arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+1
-1
arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+1
-2
arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+1
-2
arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+1
-1
arch/tile/include/asm/signal.h
+1
-1
arch/tile/include/asm/signal.h
···
25
25
26
26
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
27
27
struct pt_regs;
28
-
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
28
+
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
29
29
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
30
30
void do_signal(struct pt_regs *regs);
31
31
#endif
+3
-3
arch/tile/kernel/compat_signal.c
+3
-3
arch/tile/kernel/compat_signal.c
···
290
290
return ret;
291
291
}
292
292
293
+
/* The assembly shim for this function arranges to ignore the return value. */
293
294
long compat_sys_rt_sigreturn(struct pt_regs *regs)
294
295
{
295
296
struct compat_rt_sigframe __user *frame =
296
297
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
297
298
sigset_t set;
298
-
long r0;
299
299
300
300
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
301
301
goto badframe;
···
308
308
recalc_sigpending();
309
309
spin_unlock_irq(¤t->sighand->siglock);
310
310
311
-
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
311
+
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
312
312
goto badframe;
313
313
314
314
if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
315
315
goto badframe;
316
316
317
-
return r0;
317
+
return 0;
318
318
319
319
badframe:
320
320
force_sig(SIGSEGV, current);
+21
-3
arch/tile/kernel/intvec_32.S
+21
-3
arch/tile/kernel/intvec_32.S
···
1342
1342
lw r20, r20
1343
1343
1344
1344
/* Jump to syscall handler. */
1345
-
jalr r20; .Lhandle_syscall_link:
1346
-
FEEDBACK_REENTER(handle_syscall)
1345
+
jalr r20
1346
+
.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1347
1347
1348
1348
/*
1349
1349
* Write our r0 onto the stack so it gets restored instead
···
1351
1351
*/
1352
1352
PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1353
1353
sw r29, r0
1354
+
1355
+
.Lsyscall_sigreturn_skip:
1356
+
FEEDBACK_REENTER(handle_syscall)
1354
1357
1355
1358
/* Do syscall trace again, if requested. */
1356
1359
lw r30, r31
···
1539
1536
}; \
1540
1537
STD_ENDPROC(_##x)
1541
1538
1539
+
/*
1540
+
* Special-case sigreturn to not write r0 to the stack on return.
1541
+
* This is technically more efficient, but it also avoids difficulties
1542
+
* in the 64-bit OS when handling 32-bit compat code, since we must not
1543
+
* sign-extend r0 for the sigreturn return-value case.
1544
+
*/
1545
+
#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1546
+
STD_ENTRY(_##x); \
1547
+
addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1548
+
{ \
1549
+
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1550
+
j x \
1551
+
}; \
1552
+
STD_ENDPROC(_##x)
1553
+
1542
1554
PTREGS_SYSCALL(sys_execve, r3)
1543
1555
PTREGS_SYSCALL(sys_sigaltstack, r2)
1544
-
PTREGS_SYSCALL(sys_rt_sigreturn, r0)
1556
+
PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1545
1557
PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
1546
1558
1547
1559
/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+8
arch/tile/kernel/process.c
+8
arch/tile/kernel/process.c
···
212
212
childregs->sp = sp; /* override with new user stack pointer */
213
213
214
214
/*
215
+
* If CLONE_SETTLS is set, set "tp" in the new task to "r4",
216
+
* which is passed in as arg #5 to sys_clone().
217
+
*/
218
+
if (clone_flags & CLONE_SETTLS)
219
+
childregs->tp = regs->regs[4];
220
+
221
+
/*
215
222
* Copy the callee-saved registers from the passed pt_regs struct
216
223
* into the context-switch callee-saved registers area.
217
224
* This way when we start the interrupt-return sequence, the
···
546
539
return __switch_to(prev, next, next_current_ksp0(next));
547
540
}
548
541
542
+
/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
549
543
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
550
544
void __user *, parent_tidptr, void __user *, child_tidptr,
551
545
struct pt_regs *, regs)
+4
-6
arch/tile/kernel/signal.c
+4
-6
arch/tile/kernel/signal.c
···
52
52
*/
53
53
54
54
int restore_sigcontext(struct pt_regs *regs,
55
-
struct sigcontext __user *sc, long *pr0)
55
+
struct sigcontext __user *sc)
56
56
{
57
57
int err = 0;
58
58
int i;
···
75
75
76
76
regs->faultnum = INT_SWINT_1_SIGRETURN;
77
77
78
-
err |= __get_user(*pr0, &sc->gregs[0]);
79
78
return err;
80
79
}
81
80
82
-
/* sigreturn() returns long since it restores r0 in the interrupted code. */
81
+
/* The assembly shim for this function arranges to ignore the return value. */
83
82
SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
84
83
{
85
84
struct rt_sigframe __user *frame =
86
85
(struct rt_sigframe __user *)(regs->sp);
87
86
sigset_t set;
88
-
long r0;
89
87
90
88
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
91
89
goto badframe;
···
96
98
recalc_sigpending();
97
99
spin_unlock_irq(¤t->sighand->siglock);
98
100
99
-
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
101
+
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
100
102
goto badframe;
101
103
102
104
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
103
105
goto badframe;
104
106
105
-
return r0;
107
+
return 0;
106
108
107
109
badframe:
108
110
force_sig(SIGSEGV, current);
+1
-1
arch/x86/boot/compressed/misc.c
+1
-1
arch/x86/boot/compressed/misc.c
···
355
355
if (heap > 0x3fffffffffffUL)
356
356
error("Destination address too large");
357
357
#else
358
-
if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
358
+
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
359
359
error("Destination address too large");
360
360
#endif
361
361
#ifndef CONFIG_RELOCATABLE
+3
arch/x86/include/asm/e820.h
+3
arch/x86/include/asm/e820.h
+1
arch/x86/kernel/Makefile
+1
arch/x86/kernel/Makefile
+8
arch/x86/kernel/apic/apic.c
+8
arch/x86/kernel/apic/apic.c
···
1389
1389
1390
1390
setup_apic_nmi_watchdog(NULL);
1391
1391
apic_pm_activate();
1392
+
1393
+
/*
1394
+
* Now that local APIC setup is completed for BP, configure the fault
1395
+
* handling for interrupt remapping.
1396
+
*/
1397
+
if (!smp_processor_id() && intr_remapping_enabled)
1398
+
enable_drhd_fault_handling();
1399
+
1392
1400
}
1393
1401
1394
1402
#ifdef CONFIG_X86_X2APIC
+2
-2
arch/x86/kernel/apic/io_apic.c
+2
-2
arch/x86/kernel/apic/io_apic.c
···
2430
2430
{
2431
2431
struct irq_cfg *cfg = data->chip_data;
2432
2432
int i, do_unmask_irq = 0, irq = data->irq;
2433
-
struct irq_desc *desc = irq_to_desc(irq);
2434
2433
unsigned long v;
2435
2434
2436
2435
irq_complete_move(cfg);
2437
2436
#ifdef CONFIG_GENERIC_PENDING_IRQ
2438
2437
/* If we are moving the irq we need to mask it */
2439
-
if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2438
+
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2440
2439
do_unmask_irq = 1;
2441
2440
mask_ioapic(cfg);
2442
2441
}
···
3412
3413
msg.data |= MSI_DATA_VECTOR(cfg->vector);
3413
3414
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3414
3415
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3416
+
msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3415
3417
3416
3418
dmar_msi_write(irq, &msg);
3417
3419
-7
arch/x86/kernel/apic/probe_64.c
-7
arch/x86/kernel/apic/probe_64.c
···
79
79
/* need to update phys_pkg_id */
80
80
apic->phys_pkg_id = apicid_phys_pkg_id;
81
81
}
82
-
83
-
/*
84
-
* Now that apic routing model is selected, configure the
85
-
* fault handling for intr remapping.
86
-
*/
87
-
if (intr_remapping_enabled)
88
-
enable_drhd_fault_handling();
89
82
}
90
83
91
84
/* Same for both flat and physical. */
+7
-5
arch/x86/kernel/head_32.S
+7
-5
arch/x86/kernel/head_32.S
···
60
60
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
61
61
#endif
62
62
63
+
/* Number of possible pages in the lowmem region */
64
+
LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
65
+
63
66
/* Enough space to fit pagetables for the low memory linear map */
64
-
MAPPING_BEYOND_END = \
65
-
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
67
+
MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
66
68
67
69
/*
68
70
* Worst-case size of the kernel mapping we need to make:
69
-
* the worst-case size of the kernel itself, plus the extra we need
70
-
* to map for the linear map.
71
+
* a relocatable kernel can live anywhere in lowmem, so we need to be able
72
+
* to map all of lowmem.
71
73
*/
72
-
KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT
74
+
KERNEL_PAGES = LOWMEM_PAGES
73
75
74
76
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
75
77
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+16
-10
arch/x86/kernel/hpet.c
+16
-10
arch/x86/kernel/hpet.c
···
27
27
#define HPET_DEV_FSB_CAP 0x1000
28
28
#define HPET_DEV_PERI_CAP 0x2000
29
29
30
+
#define HPET_MIN_CYCLES 128
31
+
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
32
+
30
33
#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
31
34
32
35
/*
···
302
299
/* Calculate the min / max delta */
303
300
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
304
301
&hpet_clockevent);
305
-
/* 5 usec minimum reprogramming delta. */
306
-
hpet_clockevent.min_delta_ns = 5000;
302
+
/* Setup minimum reprogramming delta. */
303
+
hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA,
304
+
&hpet_clockevent);
307
305
308
306
/*
309
307
* Start hpet with the boot cpu mask and make it
···
397
393
* the wraparound into account) nor a simple count down event
398
394
* mode. Further the write to the comparator register is
399
395
* delayed internally up to two HPET clock cycles in certain
400
-
* chipsets (ATI, ICH9,10). We worked around that by reading
401
-
* back the compare register, but that required another
402
-
* workaround for ICH9,10 chips where the first readout after
403
-
* write can return the old stale value. We already have a
404
-
* minimum delta of 5us enforced, but a NMI or SMI hitting
396
+
* chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
397
+
* longer delays. We worked around that by reading back the
398
+
* compare register, but that required another workaround for
399
+
* ICH9,10 chips where the first readout after write can
400
+
* return the old stale value. We already had a minimum
401
+
* programming delta of 5us enforced, but a NMI or SMI hitting
405
402
* between the counter readout and the comparator write can
406
403
* move us behind that point easily. Now instead of reading
407
404
* the compare register back several times, we make the ETIME
408
405
* decision based on the following: Return ETIME if the
409
-
* counter value after the write is less than 8 HPET cycles
406
+
* counter value after the write is less than HPET_MIN_CYCLES
410
407
* away from the event or if the counter is already ahead of
411
-
* the event.
408
+
* the event. The minimum programming delta for the generic
409
+
* clockevents code is set to 1.5 * HPET_MIN_CYCLES.
412
410
*/
413
411
res = (s32)(cnt - hpet_readl(HPET_COUNTER));
414
412
415
-
return res < 8 ? -ETIME : 0;
413
+
return res < HPET_MIN_CYCLES ? -ETIME : 0;
416
414
}
417
415
418
416
static void hpet_legacy_set_mode(enum clock_event_mode mode,
+5
-11
arch/x86/kernel/microcode_intel.c
+5
-11
arch/x86/kernel/microcode_intel.c
···
364
364
365
365
/* For performance reasons, reuse mc area when possible */
366
366
if (!mc || mc_size > curr_mc_size) {
367
-
if (mc)
368
-
vfree(mc);
367
+
vfree(mc);
369
368
mc = vmalloc(mc_size);
370
369
if (!mc)
371
370
break;
···
373
374
374
375
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
375
376
microcode_sanity_check(mc) < 0) {
376
-
vfree(mc);
377
377
break;
378
378
}
379
379
380
380
if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
381
-
if (new_mc)
382
-
vfree(new_mc);
381
+
vfree(new_mc);
383
382
new_rev = mc_header.rev;
384
383
new_mc = mc;
385
384
mc = NULL; /* trigger new vmalloc */
···
387
390
leftover -= mc_size;
388
391
}
389
392
390
-
if (mc)
391
-
vfree(mc);
393
+
vfree(mc);
392
394
393
395
if (leftover) {
394
-
if (new_mc)
395
-
vfree(new_mc);
396
+
vfree(new_mc);
396
397
state = UCODE_ERROR;
397
398
goto out;
398
399
}
···
400
405
goto out;
401
406
}
402
407
403
-
if (uci->mc)
404
-
vfree(uci->mc);
408
+
vfree(uci->mc);
405
409
uci->mc = (struct microcode_intel *)new_mc;
406
410
407
411
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+48
arch/x86/kernel/resource.c
+48
arch/x86/kernel/resource.c
···
1
+
#include <linux/ioport.h>
2
+
#include <asm/e820.h>
3
+
4
+
static void resource_clip(struct resource *res, resource_size_t start,
5
+
resource_size_t end)
6
+
{
7
+
resource_size_t low = 0, high = 0;
8
+
9
+
if (res->end < start || res->start > end)
10
+
return; /* no conflict */
11
+
12
+
if (res->start < start)
13
+
low = start - res->start;
14
+
15
+
if (res->end > end)
16
+
high = res->end - end;
17
+
18
+
/* Keep the area above or below the conflict, whichever is larger */
19
+
if (low > high)
20
+
res->end = start - 1;
21
+
else
22
+
res->start = end + 1;
23
+
}
24
+
25
+
static void remove_e820_regions(struct resource *avail)
26
+
{
27
+
int i;
28
+
struct e820entry *entry;
29
+
30
+
for (i = 0; i < e820.nr_map; i++) {
31
+
entry = &e820.map[i];
32
+
33
+
resource_clip(avail, entry->addr,
34
+
entry->addr + entry->size - 1);
35
+
}
36
+
}
37
+
38
+
void arch_remove_reservations(struct resource *avail)
39
+
{
40
+
/* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
41
+
if (avail->flags & IORESOURCE_MEM) {
42
+
if (avail->start < BIOS_END)
43
+
avail->start = BIOS_END;
44
+
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
45
+
46
+
remove_e820_regions(avail);
47
+
}
48
+
}
+14
-4
arch/x86/kernel/setup.c
+14
-4
arch/x86/kernel/setup.c
···
501
501
return total << PAGE_SHIFT;
502
502
}
503
503
504
-
#define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF
504
+
/*
505
+
* Keep the crash kernel below this limit. On 32 bits earlier kernels
506
+
* would limit the kernel to the low 512 MiB due to mapping restrictions.
507
+
* On 64 bits, kexec-tools currently limits us to 896 MiB; increase this
508
+
* limit once kexec-tools are fixed.
509
+
*/
510
+
#ifdef CONFIG_X86_32
511
+
# define CRASH_KERNEL_ADDR_MAX (512 << 20)
512
+
#else
513
+
# define CRASH_KERNEL_ADDR_MAX (896 << 20)
514
+
#endif
515
+
505
516
static void __init reserve_crashkernel(void)
506
517
{
507
518
unsigned long long total_mem;
···
531
520
const unsigned long long alignment = 16<<20; /* 16M */
532
521
533
522
/*
534
-
* kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX
523
+
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
535
524
*/
536
525
crash_base = memblock_find_in_range(alignment,
537
-
DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment);
526
+
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
538
527
539
528
if (crash_base == MEMBLOCK_ERROR) {
540
529
pr_info("crashkernel reservation failed - No suitable area found.\n");
···
780
769
781
770
x86_init.oem.arch_setup();
782
771
783
-
resource_alloc_from_bottom = 0;
784
772
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
785
773
setup_memory_map();
786
774
parse_setup_data();
+2
-1
arch/x86/kernel/xsave.c
+2
-1
arch/x86/kernel/xsave.c
···
394
394
* Setup init_xstate_buf to represent the init state of
395
395
* all the features managed by the xsave
396
396
*/
397
-
init_xstate_buf = alloc_bootmem(xstate_size);
397
+
init_xstate_buf = alloc_bootmem_align(xstate_size,
398
+
__alignof__(struct xsave_struct));
398
399
init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
399
400
400
401
clts();
+5
-13
arch/x86/pci/i386.c
+5
-13
arch/x86/pci/i386.c
···
65
65
resource_size_t size, resource_size_t align)
66
66
{
67
67
struct pci_dev *dev = data;
68
-
resource_size_t start = round_down(res->end - size + 1, align);
68
+
resource_size_t start = res->start;
69
69
70
70
if (res->flags & IORESOURCE_IO) {
71
-
72
-
/*
73
-
* If we're avoiding ISA aliases, the largest contiguous I/O
74
-
* port space is 256 bytes. Clearing bits 9 and 10 preserves
75
-
* all 256-byte and smaller alignments, so the result will
76
-
* still be correctly aligned.
77
-
*/
78
-
if (!skip_isa_ioresource_align(dev))
79
-
start &= ~0x300;
80
-
} else if (res->flags & IORESOURCE_MEM) {
81
-
if (start < BIOS_END)
82
-
start = res->end; /* fail; no space */
71
+
if (skip_isa_ioresource_align(dev))
72
+
return start;
73
+
if (start & 0x300)
74
+
start = (start + 0x3ff) & ~0x3ff;
83
75
}
84
76
return start;
85
77
}
+2
-2
arch/x86/vdso/Makefile
+2
-2
arch/x86/vdso/Makefile
···
25
25
26
26
export CPPFLAGS_vdso.lds += -P -C
27
27
28
-
VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
28
+
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
29
29
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
30
30
31
31
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
···
69
69
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
70
70
71
71
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
72
-
VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
72
+
VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
73
73
74
74
# This makes sure the $(obj) subdirectory exists even though vdso32/
75
75
# is not a kbuild sub-make subdirectory.
+3
-2
block/blk-map.c
+3
-2
block/blk-map.c
···
201
201
for (i = 0; i < iov_count; i++) {
202
202
unsigned long uaddr = (unsigned long)iov[i].iov_base;
203
203
204
+
if (!iov[i].iov_len)
205
+
return -EINVAL;
206
+
204
207
if (uaddr & queue_dma_alignment(q)) {
205
208
unaligned = 1;
206
209
break;
207
210
}
208
-
if (!iov[i].iov_len)
209
-
return -EINVAL;
210
211
}
211
212
212
213
if (unaligned || (q->dma_pad_mask & len) || map_data)
+3
-3
block/blk-merge.c
+3
-3
block/blk-merge.c
···
21
21
return 0;
22
22
23
23
fbio = bio;
24
-
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
24
+
cluster = blk_queue_cluster(q);
25
25
seg_size = 0;
26
26
nr_phys_segs = 0;
27
27
for_each_bio(bio) {
···
87
87
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88
88
struct bio *nxt)
89
89
{
90
-
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
90
+
if (!blk_queue_cluster(q))
91
91
return 0;
92
92
93
93
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
···
123
123
int nsegs, cluster;
124
124
125
125
nsegs = 0;
126
-
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
126
+
cluster = blk_queue_cluster(q);
127
127
128
128
/*
129
129
* for each bio in rq
+22
-29
block/blk-settings.c
+22
-29
block/blk-settings.c
···
126
126
lim->alignment_offset = 0;
127
127
lim->io_opt = 0;
128
128
lim->misaligned = 0;
129
-
lim->no_cluster = 0;
129
+
lim->cluster = 1;
130
130
}
131
131
EXPORT_SYMBOL(blk_set_default_limits);
132
132
···
229
229
EXPORT_SYMBOL(blk_queue_bounce_limit);
230
230
231
231
/**
232
-
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
233
-
* @q: the request queue for the device
232
+
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
233
+
* @limits: the queue limits
234
234
* @max_hw_sectors: max hardware sectors in the usual 512b unit
235
235
*
236
236
* Description:
···
244
244
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
245
245
* The soft limit can not exceed max_hw_sectors.
246
246
**/
247
-
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
247
+
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
248
248
{
249
249
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
250
250
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
···
252
252
__func__, max_hw_sectors);
253
253
}
254
254
255
-
q->limits.max_hw_sectors = max_hw_sectors;
256
-
q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
257
-
BLK_DEF_MAX_SECTORS);
255
+
limits->max_hw_sectors = max_hw_sectors;
256
+
limits->max_sectors = min_t(unsigned int, max_hw_sectors,
257
+
BLK_DEF_MAX_SECTORS);
258
+
}
259
+
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
260
+
261
+
/**
262
+
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
263
+
* @q: the request queue for the device
264
+
* @max_hw_sectors: max hardware sectors in the usual 512b unit
265
+
*
266
+
* Description:
267
+
* See description for blk_limits_max_hw_sectors().
268
+
**/
269
+
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
270
+
{
271
+
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
258
272
}
259
273
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
260
274
···
478
464
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
479
465
{
480
466
blk_stack_limits(&t->limits, &b->limits, 0);
481
-
482
-
if (!t->queue_lock)
483
-
WARN_ON_ONCE(1);
484
-
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
485
-
unsigned long flags;
486
-
spin_lock_irqsave(t->queue_lock, flags);
487
-
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
488
-
spin_unlock_irqrestore(t->queue_lock, flags);
489
-
}
490
467
}
491
468
EXPORT_SYMBOL(blk_queue_stack_limits);
492
469
···
550
545
t->io_min = max(t->io_min, b->io_min);
551
546
t->io_opt = lcm(t->io_opt, b->io_opt);
552
547
553
-
t->no_cluster |= b->no_cluster;
548
+
t->cluster &= b->cluster;
554
549
t->discard_zeroes_data &= b->discard_zeroes_data;
555
550
556
551
/* Physical block size a multiple of the logical block size? */
···
646
641
sector_t offset)
647
642
{
648
643
struct request_queue *t = disk->queue;
649
-
struct request_queue *b = bdev_get_queue(bdev);
650
644
651
645
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
652
646
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
···
655
651
656
652
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
657
653
top, bottom);
658
-
}
659
-
660
-
if (!t->queue_lock)
661
-
WARN_ON_ONCE(1);
662
-
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
663
-
unsigned long flags;
664
-
665
-
spin_lock_irqsave(t->queue_lock, flags);
666
-
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
667
-
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
668
-
spin_unlock_irqrestore(t->queue_lock, flags);
669
654
}
670
655
}
671
656
EXPORT_SYMBOL(disk_stack_limits);
+1
-1
block/blk-sysfs.c
+1
-1
block/blk-sysfs.c
···
119
119
120
120
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
121
121
{
122
-
if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
122
+
if (blk_queue_cluster(q))
123
123
return queue_var_show(queue_max_segment_size(q), (page));
124
124
125
125
return queue_var_show(PAGE_CACHE_SIZE, (page));
+25
-14
block/blk-throttle.c
+25
-14
block/blk-throttle.c
···
355
355
tg->slice_end[rw], jiffies);
356
356
}
357
357
358
+
static inline void throtl_set_slice_end(struct throtl_data *td,
359
+
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360
+
{
361
+
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
362
+
}
363
+
358
364
static inline void throtl_extend_slice(struct throtl_data *td,
359
365
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360
366
{
···
396
390
*/
397
391
if (throtl_slice_used(td, tg, rw))
398
392
return;
393
+
394
+
/*
395
+
* A bio has been dispatched. Also adjust slice_end. It might happen
396
+
* that initially cgroup limit was very low resulting in high
397
+
* slice_end, but later limit was bumped up and bio was dispached
398
+
* sooner, then we need to reduce slice_end. A high bogus slice_end
399
+
* is bad because it does not allow new slice to start.
400
+
*/
401
+
402
+
throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
399
403
400
404
time_elapsed = jiffies - tg->slice_start[rw];
401
405
···
725
709
struct throtl_grp *tg;
726
710
struct hlist_node *pos, *n;
727
711
728
-
/*
729
-
* Make sure atomic_inc() effects from
730
-
* throtl_update_blkio_group_read_bps(), group of functions are
731
-
* visible.
732
-
* Is this required or smp_mb__after_atomic_inc() was suffcient
733
-
* after the atomic_inc().
734
-
*/
735
-
smp_rmb();
736
712
if (!atomic_read(&td->limits_changed))
737
713
return;
738
714
739
715
throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
740
716
741
-
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
742
-
/*
743
-
* Do I need an smp_rmb() here to make sure tg->limits_changed
744
-
* update is visible. I am relying on smp_rmb() at the
745
-
* beginning of function and not putting a new one here.
746
-
*/
717
+
/*
718
+
* Make sure updates from throtl_update_blkio_group_read_bps() group
719
+
* of functions to tg->limits_changed are visible. We do not
720
+
* want update td->limits_changed to be visible but update to
721
+
* tg->limits_changed not being visible yet on this cpu. Hence
722
+
* the read barrier.
723
+
*/
724
+
smp_rmb();
747
725
726
+
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
748
727
if (throtl_tg_on_rr(tg) && tg->limits_changed) {
749
728
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
750
729
" riops=%u wiops=%u", tg->bps[READ],
+3
drivers/acpi/acpica/evgpeinit.c
+3
drivers/acpi/acpica/evgpeinit.c
···
408
408
return_ACPI_STATUS(AE_OK);
409
409
}
410
410
411
+
/* Disable the GPE in case it's been enabled already. */
412
+
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
413
+
411
414
/*
412
415
* Add the GPE information from above to the gpe_event_info block for
413
416
* use during dispatch of this GPE.
-5
drivers/acpi/battery.c
-5
drivers/acpi/battery.c
···
130
130
unsigned long flags;
131
131
};
132
132
133
-
static int acpi_battery_update(struct acpi_battery *battery);
134
-
135
133
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
136
134
137
135
inline int acpi_battery_present(struct acpi_battery *battery)
···
183
185
{
184
186
int ret = 0;
185
187
struct acpi_battery *battery = to_acpi_battery(psy);
186
-
187
-
if (acpi_battery_update(battery))
188
-
return -ENODEV;
189
188
190
189
if (acpi_battery_present(battery)) {
191
190
/* run battery update only if it is present */
+60
-37
drivers/acpi/scan.c
+60
-37
drivers/acpi/scan.c
···
705
705
}
706
706
707
707
static acpi_status
708
-
acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
709
-
union acpi_object *package)
708
+
acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
709
+
struct acpi_device_wakeup *wakeup)
710
710
{
711
-
int i = 0;
711
+
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
712
+
union acpi_object *package = NULL;
712
713
union acpi_object *element = NULL;
714
+
acpi_status status;
715
+
int i = 0;
713
716
714
-
if (!device || !package || (package->package.count < 2))
717
+
if (!wakeup)
715
718
return AE_BAD_PARAMETER;
719
+
720
+
/* _PRW */
721
+
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
722
+
if (ACPI_FAILURE(status)) {
723
+
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
724
+
return status;
725
+
}
726
+
727
+
package = (union acpi_object *)buffer.pointer;
728
+
729
+
if (!package || (package->package.count < 2)) {
730
+
status = AE_BAD_DATA;
731
+
goto out;
732
+
}
716
733
717
734
element = &(package->package.elements[0]);
718
-
if (!element)
719
-
return AE_BAD_PARAMETER;
735
+
if (!element) {
736
+
status = AE_BAD_DATA;
737
+
goto out;
738
+
}
720
739
if (element->type == ACPI_TYPE_PACKAGE) {
721
740
if ((element->package.count < 2) ||
722
741
(element->package.elements[0].type !=
723
742
ACPI_TYPE_LOCAL_REFERENCE)
724
-
|| (element->package.elements[1].type != ACPI_TYPE_INTEGER))
725
-
return AE_BAD_DATA;
726
-
device->wakeup.gpe_device =
743
+
|| (element->package.elements[1].type != ACPI_TYPE_INTEGER)) {
744
+
status = AE_BAD_DATA;
745
+
goto out;
746
+
}
747
+
wakeup->gpe_device =
727
748
element->package.elements[0].reference.handle;
728
-
device->wakeup.gpe_number =
749
+
wakeup->gpe_number =
729
750
(u32) element->package.elements[1].integer.value;
730
751
} else if (element->type == ACPI_TYPE_INTEGER) {
731
-
device->wakeup.gpe_number = element->integer.value;
732
-
} else
733
-
return AE_BAD_DATA;
752
+
wakeup->gpe_device = NULL;
753
+
wakeup->gpe_number = element->integer.value;
754
+
} else {
755
+
status = AE_BAD_DATA;
756
+
goto out;
757
+
}
734
758
735
759
element = &(package->package.elements[1]);
736
760
if (element->type != ACPI_TYPE_INTEGER) {
737
-
return AE_BAD_DATA;
761
+
status = AE_BAD_DATA;
762
+
goto out;
738
763
}
739
-
device->wakeup.sleep_state = element->integer.value;
764
+
wakeup->sleep_state = element->integer.value;
740
765
741
766
if ((package->package.count - 2) > ACPI_MAX_HANDLES) {
742
-
return AE_NO_MEMORY;
767
+
status = AE_NO_MEMORY;
768
+
goto out;
743
769
}
744
-
device->wakeup.resources.count = package->package.count - 2;
745
-
for (i = 0; i < device->wakeup.resources.count; i++) {
770
+
wakeup->resources.count = package->package.count - 2;
771
+
for (i = 0; i < wakeup->resources.count; i++) {
746
772
element = &(package->package.elements[i + 2]);
747
-
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
748
-
return AE_BAD_DATA;
773
+
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
774
+
status = AE_BAD_DATA;
775
+
goto out;
776
+
}
749
777
750
-
device->wakeup.resources.handles[i] = element->reference.handle;
778
+
wakeup->resources.handles[i] = element->reference.handle;
751
779
}
752
780
753
-
acpi_gpe_can_wake(device->wakeup.gpe_device, device->wakeup.gpe_number);
781
+
acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number);
754
782
755
-
return AE_OK;
783
+
out:
784
+
kfree(buffer.pointer);
785
+
786
+
return status;
756
787
}
757
788
758
789
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
···
818
787
static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
819
788
{
820
789
acpi_status status = 0;
821
-
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
822
-
union acpi_object *package = NULL;
823
790
int psw_error;
824
791
825
-
/* _PRW */
826
-
status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
827
-
if (ACPI_FAILURE(status)) {
828
-
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
829
-
goto end;
830
-
}
831
-
832
-
package = (union acpi_object *)buffer.pointer;
833
-
status = acpi_bus_extract_wakeup_device_power_package(device, package);
792
+
status = acpi_bus_extract_wakeup_device_power_package(device->handle,
793
+
&device->wakeup);
834
794
if (ACPI_FAILURE(status)) {
835
795
ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
836
796
goto end;
837
797
}
838
-
839
-
kfree(buffer.pointer);
840
798
841
799
device->wakeup.flags.valid = 1;
842
800
device->wakeup.prepare_count = 0;
···
1371
1351
struct acpi_bus_ops *ops = context;
1372
1352
int type;
1373
1353
unsigned long long sta;
1354
+
struct acpi_device_wakeup wakeup;
1374
1355
struct acpi_device *device;
1375
1356
acpi_status status;
1376
1357
int result;
···
1381
1360
return AE_OK;
1382
1361
1383
1362
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1384
-
!(sta & ACPI_STA_DEVICE_FUNCTIONING))
1363
+
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
1364
+
acpi_bus_extract_wakeup_device_power_package(handle, &wakeup);
1385
1365
return AE_CTRL_DEPTH;
1366
+
}
1386
1367
1387
1368
/*
1388
1369
* We may already have an acpi_device from a previous enumeration. If
+11
-11
drivers/ata/Kconfig
+11
-11
drivers/ata/Kconfig
···
128
128
129
129
If unsure, say N.
130
130
131
-
config PATA_MPC52xx
132
-
tristate "Freescale MPC52xx SoC internal IDE"
133
-
depends on PPC_MPC52xx && PPC_BESTCOMM
134
-
select PPC_BESTCOMM_ATA
135
-
help
136
-
This option enables support for integrated IDE controller
137
-
of the Freescale MPC52xx SoC.
138
-
139
-
If unsure, say N.
140
-
141
131
config PATA_OCTEON_CF
142
132
tristate "OCTEON Boot Bus Compact Flash support"
143
133
depends on CPU_CAVIUM_OCTEON
···
356
366
357
367
config PATA_CS5536
358
368
tristate "CS5536 PATA support"
359
-
depends on PCI && X86 && !X86_64
369
+
depends on PCI
360
370
help
361
371
This option enables support for the AMD CS5536
362
372
companion chip used with the Geode LX processor family.
···
478
488
controllers. If you wish to use only the SATA ports then select
479
489
the AHCI driver alone. If you wish to the use the PATA port or
480
490
both SATA and PATA include this driver.
491
+
492
+
If unsure, say N.
493
+
494
+
config PATA_MPC52xx
495
+
tristate "Freescale MPC52xx SoC internal IDE"
496
+
depends on PPC_MPC52xx && PPC_BESTCOMM
497
+
select PPC_BESTCOMM_ATA
498
+
help
499
+
This option enables support for integrated IDE controller
500
+
of the Freescale MPC52xx SoC.
481
501
482
502
If unsure, say N.
483
503
+1
-1
drivers/ata/Makefile
+1
-1
drivers/ata/Makefile
···
11
11
12
12
# SFF w/ custom DMA
13
13
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
14
-
obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
15
14
obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
16
15
obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
17
16
obj-$(CONFIG_SATA_SX4) += sata_sx4.o
···
51
52
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
52
53
obj-$(CONFIG_PATA_MACIO) += pata_macio.o
53
54
obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
55
+
obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
54
56
obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
55
57
obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
56
58
obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
+15
-9
drivers/ata/libata-core.c
+15
-9
drivers/ata/libata-core.c
···
4807
4807
{
4808
4808
struct ata_device *dev = qc->dev;
4809
4809
4810
-
if (ata_tag_internal(qc->tag))
4811
-
return;
4812
-
4813
4810
if (ata_is_nodata(qc->tf.protocol))
4814
4811
return;
4815
4812
···
4855
4858
if (unlikely(qc->err_mask))
4856
4859
qc->flags |= ATA_QCFLAG_FAILED;
4857
4860
4858
-
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4859
-
/* always fill result TF for failed qc */
4861
+
/*
4862
+
* Finish internal commands without any further processing
4863
+
* and always with the result TF filled.
4864
+
*/
4865
+
if (unlikely(ata_tag_internal(qc->tag))) {
4860
4866
fill_result_tf(qc);
4867
+
__ata_qc_complete(qc);
4868
+
return;
4869
+
}
4861
4870
4862
-
if (!ata_tag_internal(qc->tag))
4863
-
ata_qc_schedule_eh(qc);
4864
-
else
4865
-
__ata_qc_complete(qc);
4871
+
/*
4872
+
* Non-internal qc has failed. Fill the result TF and
4873
+
* summon EH.
4874
+
*/
4875
+
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4876
+
fill_result_tf(qc);
4877
+
ata_qc_schedule_eh(qc);
4866
4878
return;
4867
4879
}
4868
4880
+14
-3
drivers/ata/libata-eh.c
+14
-3
drivers/ata/libata-eh.c
···
3275
3275
struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3276
3276
struct ata_eh_context *ehc = &link->eh_context;
3277
3277
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3278
+
enum ata_lpm_policy old_policy = link->lpm_policy;
3278
3279
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3279
3280
unsigned int err_mask;
3280
3281
int rc;
···
3339
3338
goto fail;
3340
3339
}
3341
3340
3341
+
/*
3342
+
* Low level driver acked the transition. Issue DIPM command
3343
+
* with the new policy set.
3344
+
*/
3345
+
link->lpm_policy = policy;
3346
+
if (ap && ap->slave_link)
3347
+
ap->slave_link->lpm_policy = policy;
3348
+
3342
3349
/* host config updated, enable DIPM if transitioning to MIN_POWER */
3343
3350
ata_for_each_dev(dev, link, ENABLED) {
3344
3351
if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
···
3362
3353
}
3363
3354
}
3364
3355
3365
-
link->lpm_policy = policy;
3366
-
if (ap && ap->slave_link)
3367
-
ap->slave_link->lpm_policy = policy;
3368
3356
return 0;
3369
3357
3370
3358
fail:
3359
+
/* restore the old policy */
3360
+
link->lpm_policy = old_policy;
3361
+
if (ap && ap->slave_link)
3362
+
ap->slave_link->lpm_policy = old_policy;
3363
+
3371
3364
/* if no device or only one more chance is left, disable LPM */
3372
3365
if (!dev || ehc->tries[dev->devno] <= 2) {
3373
3366
ata_link_printk(link, KERN_WARNING,
+3
-4
drivers/ata/libata-sff.c
+3
-4
drivers/ata/libata-sff.c
···
1532
1532
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1533
1533
return ata_sff_idle_irq(ap);
1534
1534
break;
1535
-
case HSM_ST:
1536
-
case HSM_ST_LAST:
1537
-
break;
1538
-
default:
1535
+
case HSM_ST_IDLE:
1539
1536
return ata_sff_idle_irq(ap);
1537
+
default:
1538
+
break;
1540
1539
}
1541
1540
1542
1541
/* check main status, clearing INTRQ if needed */
+14
-6
drivers/ata/pata_cs5536.c
+14
-6
drivers/ata/pata_cs5536.c
···
37
37
#include <linux/delay.h>
38
38
#include <linux/libata.h>
39
39
#include <scsi/scsi_host.h>
40
+
41
+
#ifdef CONFIG_X86_32
40
42
#include <asm/msr.h>
43
+
static int use_msr;
44
+
module_param_named(msr, use_msr, int, 0644);
45
+
MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
46
+
#else
47
+
#undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */
48
+
#undef wrmsr
49
+
#define rdmsr(x, y, z) do { } while (0)
50
+
#define wrmsr(x, y, z) do { } while (0)
51
+
#define use_msr 0
52
+
#endif
41
53
42
54
#define DRV_NAME "pata_cs5536"
43
-
#define DRV_VERSION "0.0.7"
55
+
#define DRV_VERSION "0.0.8"
44
56
45
57
enum {
46
58
CFG = 0,
···
87
75
IDE_ETC_NODMA = 0x03,
88
76
};
89
77
90
-
static int use_msr;
91
-
92
78
static const u32 msr_reg[4] = {
93
79
MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
94
80
};
···
98
88
static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
99
89
{
100
90
if (unlikely(use_msr)) {
101
-
u32 dummy;
91
+
u32 dummy __maybe_unused;
102
92
103
93
rdmsr(msr_reg[reg], *val, dummy);
104
94
return 0;
···
304
294
MODULE_LICENSE("GPL");
305
295
MODULE_DEVICE_TABLE(pci, cs5536);
306
296
MODULE_VERSION(DRV_VERSION);
307
-
module_param_named(msr, use_msr, int, 0644);
308
-
MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
309
297
310
298
module_init(cs5536_init);
311
299
module_exit(cs5536_exit);
+2
drivers/block/cciss.c
+2
drivers/block/cciss.c
+8
-6
drivers/block/drbd/drbd_receiver.c
+8
-6
drivers/block/drbd/drbd_receiver.c
···
3627
3627
}
3628
3628
3629
3629
shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3630
-
rv = drbd_recv(mdev, &header->h80.payload, shs);
3631
-
if (unlikely(rv != shs)) {
3632
-
dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3633
-
goto err_out;
3634
-
}
3635
-
3636
3630
if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3637
3631
dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3638
3632
goto err_out;
3633
+
}
3634
+
3635
+
if (shs) {
3636
+
rv = drbd_recv(mdev, &header->h80.payload, shs);
3637
+
if (unlikely(rv != shs)) {
3638
+
dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3639
+
goto err_out;
3640
+
}
3639
3641
}
3640
3642
3641
3643
rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
+2
-1
drivers/block/drbd/drbd_req.h
+2
-1
drivers/block/drbd/drbd_req.h
···
339
339
}
340
340
341
341
/* completion of master bio is outside of spinlock.
342
-
* If you need it irqsave, do it your self! */
342
+
* If you need it irqsave, do it your self!
343
+
* Which means: don't use from bio endio callback. */
343
344
static inline int req_mod(struct drbd_request *req,
344
345
enum drbd_req_event what)
345
346
{
+9
-1
drivers/block/drbd/drbd_worker.c
+9
-1
drivers/block/drbd/drbd_worker.c
···
193
193
*/
194
194
void drbd_endio_pri(struct bio *bio, int error)
195
195
{
196
+
unsigned long flags;
196
197
struct drbd_request *req = bio->bi_private;
197
198
struct drbd_conf *mdev = req->mdev;
199
+
struct bio_and_error m;
198
200
enum drbd_req_event what;
199
201
int uptodate = bio_flagged(bio, BIO_UPTODATE);
200
202
···
222
220
bio_put(req->private_bio);
223
221
req->private_bio = ERR_PTR(error);
224
222
225
-
req_mod(req, what);
223
+
/* not req_mod(), we need irqsave here! */
224
+
spin_lock_irqsave(&mdev->req_lock, flags);
225
+
__req_mod(req, what, &m);
226
+
spin_unlock_irqrestore(&mdev->req_lock, flags);
227
+
228
+
if (m.bio)
229
+
complete_master_bio(mdev, &m);
226
230
}
227
231
228
232
int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+4
-2
drivers/bluetooth/hci_ldisc.c
+4
-2
drivers/bluetooth/hci_ldisc.c
+9
-2
drivers/char/agp/intel-gtt.c
+9
-2
drivers/char/agp/intel-gtt.c
···
1192
1192
writel(1, intel_private.i9xx_flush_page);
1193
1193
}
1194
1194
1195
-
static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1195
+
static void i965_write_entry(dma_addr_t addr,
1196
+
unsigned int entry,
1196
1197
unsigned int flags)
1197
1198
{
1199
+
u32 pte_flags;
1200
+
1201
+
pte_flags = I810_PTE_VALID;
1202
+
if (flags == AGP_USER_CACHED_MEMORY)
1203
+
pte_flags |= I830_PTE_SYSTEM_CACHED;
1204
+
1198
1205
/* Shift high bits down */
1199
1206
addr |= (addr >> 28) & 0xf0;
1200
-
writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1207
+
writel(addr | pte_flags, intel_private.gtt + entry);
1201
1208
}
1202
1209
1203
1210
static bool gen6_check_flags(unsigned int flags)
+7
-5
drivers/char/ramoops.c
+7
-5
drivers/char/ramoops.c
···
29
29
#include <linux/ramoops.h>
30
30
31
31
#define RAMOOPS_KERNMSG_HDR "===="
32
-
#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
33
32
34
33
#define RECORD_SIZE 4096
35
34
···
64
65
struct ramoops_context, dump);
65
66
unsigned long s1_start, s2_start;
66
67
unsigned long l1_cpy, l2_cpy;
67
-
int res;
68
-
char *buf;
68
+
int res, hdr_size;
69
+
char *buf, *buf_orig;
69
70
struct timeval timestamp;
70
71
71
72
/* Only dump oopses if dump_oops is set */
···
73
74
return;
74
75
75
76
buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
77
+
buf_orig = buf;
78
+
76
79
memset(buf, '\0', RECORD_SIZE);
77
80
res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
78
81
buf += res;
···
82
81
res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
83
82
buf += res;
84
83
85
-
l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
86
-
l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
84
+
hdr_size = buf - buf_orig;
85
+
l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
86
+
l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
87
87
88
88
s2_start = l2 - l2_cpy;
89
89
s1_start = l1 - l1_cpy;
+12
-7
drivers/clocksource/sh_cmt.c
+12
-7
drivers/clocksource/sh_cmt.c
···
283
283
} while (delay);
284
284
}
285
285
286
+
static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
287
+
{
288
+
if (delta > p->max_match_value)
289
+
dev_warn(&p->pdev->dev, "delta out of range\n");
290
+
291
+
p->next_match_value = delta;
292
+
sh_cmt_clock_event_program_verify(p, 0);
293
+
}
294
+
286
295
static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
287
296
{
288
297
unsigned long flags;
289
298
290
-
if (delta > p->max_match_value)
291
-
dev_warn(&p->pdev->dev, "delta out of range\n");
292
-
293
299
spin_lock_irqsave(&p->lock, flags);
294
-
p->next_match_value = delta;
295
-
sh_cmt_clock_event_program_verify(p, 0);
300
+
__sh_cmt_set_next(p, delta);
296
301
spin_unlock_irqrestore(&p->lock, flags);
297
302
}
298
303
···
364
359
365
360
/* setup timeout if no clockevent */
366
361
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
367
-
sh_cmt_set_next(p, p->max_match_value);
362
+
__sh_cmt_set_next(p, p->max_match_value);
368
363
out:
369
364
spin_unlock_irqrestore(&p->lock, flags);
370
365
···
386
381
387
382
/* adjust the timeout to maximum if only clocksource left */
388
383
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
389
-
sh_cmt_set_next(p, p->max_match_value);
384
+
__sh_cmt_set_next(p, p->max_match_value);
390
385
391
386
spin_unlock_irqrestore(&p->lock, flags);
392
387
}
+15
-4
drivers/gpio/cs5535-gpio.c
+15
-4
drivers/gpio/cs5535-gpio.c
···
56
56
* registers, see include/linux/cs5535.h.
57
57
*/
58
58
59
-
static void errata_outl(u32 val, unsigned long addr)
59
+
static void errata_outl(struct cs5535_gpio_chip *chip, u32 val,
60
+
unsigned int reg)
60
61
{
62
+
unsigned long addr = chip->base + 0x80 + reg;
63
+
61
64
/*
62
65
* According to the CS5536 errata (#36), after suspend
63
66
* a write to the high bank GPIO register will clear all
64
67
* non-selected bits; the recommended workaround is a
65
68
* read-modify-write operation.
69
+
*
70
+
* Don't apply this errata to the edge status GPIOs, as writing
71
+
* to their lower bits will clear them.
66
72
*/
67
-
val |= inl(addr);
73
+
if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) {
74
+
if (val & 0xffff)
75
+
val |= (inl(addr) & 0xffff); /* ignore the high bits */
76
+
else
77
+
val |= (inl(addr) ^ (val >> 16));
78
+
}
68
79
outl(val, addr);
69
80
}
70
81
···
87
76
outl(1 << offset, chip->base + reg);
88
77
else
89
78
/* high bank register */
90
-
errata_outl(1 << (offset - 16), chip->base + 0x80 + reg);
79
+
errata_outl(chip, 1 << (offset - 16), reg);
91
80
}
92
81
93
82
void cs5535_gpio_set(unsigned offset, unsigned int reg)
···
109
98
outl(1 << (offset + 16), chip->base + reg);
110
99
else
111
100
/* high bank register */
112
-
errata_outl(1 << offset, chip->base + 0x80 + reg);
101
+
errata_outl(chip, 1 << offset, reg);
113
102
}
114
103
115
104
void cs5535_gpio_clear(unsigned offset, unsigned int reg)
+3
drivers/gpio/gpiolib.c
+3
drivers/gpio/gpiolib.c
+1
-1
drivers/gpio/rdc321x-gpio.c
+1
-1
drivers/gpio/rdc321x-gpio.c
+5
-2
drivers/gpu/drm/drm_crtc_helper.c
+5
-2
drivers/gpu/drm/drm_crtc_helper.c
···
241
241
}
242
242
243
243
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
244
-
if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) {
244
+
if (!drm_helper_encoder_in_use(encoder)) {
245
245
drm_encoder_disable(encoder);
246
246
/* disconnector encoder from any connector */
247
247
encoder->crtc = NULL;
···
874
874
continue;
875
875
876
876
connector->status = connector->funcs->detect(connector, false);
877
-
DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
877
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
878
+
connector->base.id,
879
+
drm_get_connector_name(connector),
880
+
old_status, connector->status);
878
881
if (old_status != connector->status)
879
882
changed = true;
880
883
}
+1
-1
drivers/gpu/drm/i915/intel_bios.c
+1
-1
drivers/gpu/drm/i915/intel_bios.c
+30
-7
drivers/gpu/drm/i915/intel_dp.c
+30
-7
drivers/gpu/drm/i915/intel_dp.c
···
479
479
uint16_t address = algo_data->address;
480
480
uint8_t msg[5];
481
481
uint8_t reply[2];
482
+
unsigned retry;
482
483
int msg_bytes;
483
484
int reply_bytes;
484
485
int ret;
···
514
513
break;
515
514
}
516
515
517
-
for (;;) {
518
-
ret = intel_dp_aux_ch(intel_dp,
519
-
msg, msg_bytes,
520
-
reply, reply_bytes);
516
+
for (retry = 0; retry < 5; retry++) {
517
+
ret = intel_dp_aux_ch(intel_dp,
518
+
msg, msg_bytes,
519
+
reply, reply_bytes);
521
520
if (ret < 0) {
522
521
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
523
522
return ret;
524
523
}
524
+
525
+
switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
526
+
case AUX_NATIVE_REPLY_ACK:
527
+
/* I2C-over-AUX Reply field is only valid
528
+
* when paired with AUX ACK.
529
+
*/
530
+
break;
531
+
case AUX_NATIVE_REPLY_NACK:
532
+
DRM_DEBUG_KMS("aux_ch native nack\n");
533
+
return -EREMOTEIO;
534
+
case AUX_NATIVE_REPLY_DEFER:
535
+
udelay(100);
536
+
continue;
537
+
default:
538
+
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
539
+
reply[0]);
540
+
return -EREMOTEIO;
541
+
}
542
+
525
543
switch (reply[0] & AUX_I2C_REPLY_MASK) {
526
544
case AUX_I2C_REPLY_ACK:
527
545
if (mode == MODE_I2C_READ) {
···
548
528
}
549
529
return reply_bytes - 1;
550
530
case AUX_I2C_REPLY_NACK:
551
-
DRM_DEBUG_KMS("aux_ch nack\n");
531
+
DRM_DEBUG_KMS("aux_i2c nack\n");
552
532
return -EREMOTEIO;
553
533
case AUX_I2C_REPLY_DEFER:
554
-
DRM_DEBUG_KMS("aux_ch defer\n");
534
+
DRM_DEBUG_KMS("aux_i2c defer\n");
555
535
udelay(100);
556
536
break;
557
537
default:
558
-
DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
538
+
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
559
539
return -EREMOTEIO;
560
540
}
561
541
}
542
+
543
+
DRM_ERROR("too many retries, giving up\n");
544
+
return -EREMOTEIO;
562
545
}
563
546
564
547
static int
+8
-11
drivers/gpu/drm/i915/intel_ringbuffer.c
+8
-11
drivers/gpu/drm/i915/intel_ringbuffer.c
···
696
696
drm_i915_private_t *dev_priv = dev->dev_private;
697
697
u32 head;
698
698
699
-
head = intel_read_status_page(ring, 4);
700
-
if (head) {
701
-
ring->head = head & HEAD_ADDR;
702
-
ring->space = ring->head - (ring->tail + 8);
703
-
if (ring->space < 0)
704
-
ring->space += ring->size;
705
-
if (ring->space >= n)
706
-
return 0;
707
-
}
708
-
709
699
trace_i915_ring_wait_begin (dev);
710
700
end = jiffies + 3 * HZ;
711
701
do {
712
-
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
702
+
/* If the reported head position has wrapped or hasn't advanced,
703
+
* fallback to the slow and accurate path.
704
+
*/
705
+
head = intel_read_status_page(ring, 4);
706
+
if (head < ring->actual_head)
707
+
head = I915_READ_HEAD(ring);
708
+
ring->actual_head = head;
709
+
ring->head = head & HEAD_ADDR;
713
710
ring->space = ring->head - (ring->tail + 8);
714
711
if (ring->space < 0)
715
712
ring->space += ring->size;
+3
-2
drivers/gpu/drm/i915/intel_ringbuffer.h
+3
-2
drivers/gpu/drm/i915/intel_ringbuffer.h
+6
-3
drivers/gpu/drm/i915/intel_sdvo.c
+6
-3
drivers/gpu/drm/i915/intel_sdvo.c
···
1908
1908
speed = mapping->i2c_speed;
1909
1909
}
1910
1910
1911
-
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
1912
-
intel_gmbus_set_speed(sdvo->i2c, speed);
1913
-
intel_gmbus_force_bit(sdvo->i2c, true);
1911
+
if (pin < GMBUS_NUM_PORTS) {
1912
+
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
1913
+
intel_gmbus_set_speed(sdvo->i2c, speed);
1914
+
intel_gmbus_force_bit(sdvo->i2c, true);
1915
+
} else
1916
+
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
1914
1917
}
1915
1918
1916
1919
static bool
+4
-3
drivers/gpu/drm/radeon/atombios_crtc.c
+4
-3
drivers/gpu/drm/radeon/atombios_crtc.c
···
253
253
case DRM_MODE_DPMS_SUSPEND:
254
254
case DRM_MODE_DPMS_OFF:
255
255
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
256
-
atombios_blank_crtc(crtc, ATOM_ENABLE);
256
+
if (radeon_crtc->enabled)
257
+
atombios_blank_crtc(crtc, ATOM_ENABLE);
257
258
if (ASIC_IS_DCE3(rdev))
258
259
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
259
260
atombios_enable_crtc(crtc, ATOM_DISABLE);
···
531
530
dp_clock = dig_connector->dp_clock;
532
531
}
533
532
}
534
-
533
+
#if 0 /* doesn't work properly on some laptops */
535
534
/* use recommended ref_div for ss */
536
535
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
537
536
if (ss_enabled) {
···
541
540
}
542
541
}
543
542
}
544
-
543
+
#endif
545
544
if (ASIC_IS_AVIVO(rdev)) {
546
545
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
547
546
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+12
-15
drivers/gpu/drm/radeon/evergreen.c
+12
-15
drivers/gpu/drm/radeon/evergreen.c
···
748
748
unsigned i;
749
749
u32 tmp;
750
750
751
+
WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
752
+
751
753
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
752
754
for (i = 0; i < rdev->usec_timeout; i++) {
753
755
/* read MC_STATUS */
···
1924
1922
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1925
1923
{
1926
1924
struct evergreen_mc_save save;
1927
-
u32 srbm_reset = 0;
1928
1925
u32 grbm_reset = 0;
1929
1926
1930
1927
dev_info(rdev->dev, "GPU softreset \n");
···
1962
1961
udelay(50);
1963
1962
WREG32(GRBM_SOFT_RESET, 0);
1964
1963
(void)RREG32(GRBM_SOFT_RESET);
1965
-
1966
-
/* reset all the system blocks */
1967
-
srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
1968
-
1969
-
dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
1970
-
WREG32(SRBM_SOFT_RESET, srbm_reset);
1971
-
(void)RREG32(SRBM_SOFT_RESET);
1972
-
udelay(50);
1973
-
WREG32(SRBM_SOFT_RESET, 0);
1974
-
(void)RREG32(SRBM_SOFT_RESET);
1975
1964
/* Wait a little for things to settle down */
1976
1965
udelay(50);
1977
1966
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
···
1972
1981
RREG32(GRBM_STATUS_SE1));
1973
1982
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1974
1983
RREG32(SRBM_STATUS));
1975
-
/* After reset we need to reinit the asic as GPU often endup in an
1976
-
* incoherent state.
1977
-
*/
1978
-
atom_asic_init(rdev->mode_info.atom_context);
1979
1984
evergreen_mc_resume(rdev, &save);
1980
1985
return 0;
1981
1986
}
···
2583
2596
{
2584
2597
int r;
2585
2598
2599
+
/* reset the asic, the gfx blocks are often in a bad state
2600
+
* after the driver is unloaded or after a resume
2601
+
*/
2602
+
if (radeon_asic_reset(rdev))
2603
+
dev_warn(rdev->dev, "GPU reset failed !\n");
2586
2604
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2587
2605
* posting will perform necessary task to bring back GPU into good
2588
2606
* shape.
···
2704
2712
r = radeon_atombios_init(rdev);
2705
2713
if (r)
2706
2714
return r;
2715
+
/* reset the asic, the gfx blocks are often in a bad state
2716
+
* after the driver is unloaded or after a resume
2717
+
*/
2718
+
if (radeon_asic_reset(rdev))
2719
+
dev_warn(rdev->dev, "GPU reset failed !\n");
2707
2720
/* Post card if necessary */
2708
2721
if (!evergreen_card_posted(rdev)) {
2709
2722
if (!rdev->bios) {
+1
drivers/gpu/drm/radeon/evergreend.h
+1
drivers/gpu/drm/radeon/evergreend.h
+8
-2
drivers/gpu/drm/radeon/r600.c
+8
-2
drivers/gpu/drm/radeon/r600.c
···
1342
1342
u32 srbm_status;
1343
1343
u32 grbm_status;
1344
1344
u32 grbm_status2;
1345
+
struct r100_gpu_lockup *lockup;
1345
1346
int r;
1347
+
1348
+
if (rdev->family >= CHIP_RV770)
1349
+
lockup = &rdev->config.rv770.lockup;
1350
+
else
1351
+
lockup = &rdev->config.r600.lockup;
1346
1352
1347
1353
srbm_status = RREG32(R_000E50_SRBM_STATUS);
1348
1354
grbm_status = RREG32(R_008010_GRBM_STATUS);
1349
1355
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1350
1356
if (!G_008010_GUI_ACTIVE(grbm_status)) {
1351
-
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1357
+
r100_gpu_lockup_update(lockup, &rdev->cp);
1352
1358
return false;
1353
1359
}
1354
1360
/* force CP activities */
···
1366
1360
radeon_ring_unlock_commit(rdev);
1367
1361
}
1368
1362
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1369
-
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1363
+
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
1370
1364
}
1371
1365
1372
1366
int r600_asic_reset(struct radeon_device *rdev)
+4
-5
drivers/gpu/drm/radeon/r600_cs.c
+4
-5
drivers/gpu/drm/radeon/r600_cs.c
···
315
315
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
316
316
/* the initial DDX does bad things with the CB size occasionally */
317
317
/* it rounds up height too far for slice tile max but the BO is smaller */
318
-
tmp = (height - 7) * 8 * bpe;
319
-
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
320
-
dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
321
-
return -EINVAL;
322
-
}
318
+
/* r600c,g also seem to flush at bad times in some apps resulting in
319
+
* bogus values here. So for linear just allow anything to avoid breaking
320
+
* broken userspace.
321
+
*/
323
322
} else {
324
323
dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
325
324
return -EINVAL;
+4
-5
drivers/gpu/drm/radeon/radeon_device.c
+4
-5
drivers/gpu/drm/radeon/radeon_device.c
···
910
910
radeon_pm_resume(rdev);
911
911
radeon_restore_bios_scratch_regs(rdev);
912
912
913
-
/* turn on display hw */
914
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
915
-
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
916
-
}
917
-
918
913
radeon_fbdev_set_suspend(rdev, 0);
919
914
release_console_sem();
920
915
···
917
922
radeon_hpd_init(rdev);
918
923
/* blat the mode back in */
919
924
drm_helper_resume_force_mode(dev);
925
+
/* turn on display hw */
926
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
927
+
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
928
+
}
920
929
return 0;
921
930
}
922
931
+19
drivers/gpu/drm/radeon/radeon_drv.c
+19
drivers/gpu/drm/radeon/radeon_drv.c
···
232
232
233
233
static struct drm_driver kms_driver;
234
234
235
+
static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
236
+
{
237
+
struct apertures_struct *ap;
238
+
bool primary = false;
239
+
240
+
ap = alloc_apertures(1);
241
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
242
+
ap->ranges[0].size = pci_resource_len(pdev, 0);
243
+
244
+
#ifdef CONFIG_X86
245
+
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
246
+
#endif
247
+
remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
248
+
kfree(ap);
249
+
}
250
+
235
251
static int __devinit
236
252
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
237
253
{
254
+
/* Get rid of things like offb */
255
+
radeon_kick_out_firmware_fb(pdev);
256
+
238
257
return drm_get_pci_dev(pdev, ent, &kms_driver);
239
258
}
240
259
+1
-1
drivers/gpu/drm/radeon/radeon_fb.c
+1
-1
drivers/gpu/drm/radeon/radeon_fb.c
···
245
245
goto out_unref;
246
246
}
247
247
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
248
-
info->apertures->ranges[0].size = rdev->mc.real_vram_size;
248
+
info->apertures->ranges[0].size = rdev->mc.aper_size;
249
249
250
250
info->fix.mmio_start = 0;
251
251
info->fix.mmio_len = 0;
+1
-1
drivers/leds/led-class.c
+1
-1
drivers/leds/led-class.c
+2
-8
drivers/md/dm-table.c
+2
-8
drivers/md/dm-table.c
···
517
517
*/
518
518
519
519
if (q->merge_bvec_fn && !ti->type->merge)
520
-
limits->max_sectors =
521
-
min_not_zero(limits->max_sectors,
522
-
(unsigned int) (PAGE_SIZE >> 9));
520
+
blk_limits_max_hw_sectors(limits,
521
+
(unsigned int) (PAGE_SIZE >> 9));
523
522
return 0;
524
523
}
525
524
EXPORT_SYMBOL_GPL(dm_set_device_limits);
···
1129
1130
* Copy table's limits to the DM device's request_queue
1130
1131
*/
1131
1132
q->limits = *limits;
1132
-
1133
-
if (limits->no_cluster)
1134
-
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1135
-
else
1136
-
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1137
1133
1138
1134
if (!dm_table_supports_discards(t))
1139
1135
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
-3
drivers/md/md.c
-3
drivers/md/md.c
···
4295
4295
goto abort;
4296
4296
mddev->queue->queuedata = mddev;
4297
4297
4298
-
/* Can be unlocked because the queue is new: no concurrency */
4299
-
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4300
-
4301
4298
blk_queue_make_request(mddev->queue, md_make_request);
4302
4299
4303
4300
disk = alloc_disk(1 << shift);
+11
-10
drivers/media/IR/keymaps/rc-rc6-mce.c
+11
-10
drivers/media/IR/keymaps/rc-rc6-mce.c
···
26
26
27
27
{ 0x800f040a, KEY_DELETE },
28
28
{ 0x800f040b, KEY_ENTER },
29
-
{ 0x800f040c, KEY_POWER },
30
-
{ 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
29
+
{ 0x800f040c, KEY_POWER }, /* PC Power */
30
+
{ 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
31
31
{ 0x800f040e, KEY_MUTE },
32
32
{ 0x800f040f, KEY_INFO },
33
33
···
56
56
{ 0x800f0422, KEY_OK },
57
57
{ 0x800f0423, KEY_EXIT },
58
58
{ 0x800f0424, KEY_DVD },
59
-
{ 0x800f0425, KEY_TUNER }, /* LiveTV */
60
-
{ 0x800f0426, KEY_EPG }, /* Guide */
61
-
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
59
+
{ 0x800f0425, KEY_TUNER }, /* LiveTV */
60
+
{ 0x800f0426, KEY_EPG }, /* Guide */
61
+
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
62
62
63
63
{ 0x800f043a, KEY_BRIGHTNESSUP },
64
64
65
65
{ 0x800f0446, KEY_TV },
66
-
{ 0x800f0447, KEY_AUDIO }, /* My Music */
67
-
{ 0x800f0448, KEY_PVR }, /* RecordedTV */
66
+
{ 0x800f0447, KEY_AUDIO }, /* My Music */
67
+
{ 0x800f0448, KEY_PVR }, /* RecordedTV */
68
68
{ 0x800f0449, KEY_CAMERA },
69
69
{ 0x800f044a, KEY_VIDEO },
70
70
{ 0x800f044c, KEY_LANGUAGE },
71
71
{ 0x800f044d, KEY_TITLE },
72
-
{ 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
72
+
{ 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
73
73
74
74
{ 0x800f0450, KEY_RADIO },
75
75
76
-
{ 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
76
+
{ 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
77
77
{ 0x800f045b, KEY_RED },
78
78
{ 0x800f045c, KEY_GREEN },
79
79
{ 0x800f045d, KEY_YELLOW },
80
80
{ 0x800f045e, KEY_BLUE },
81
81
82
+
{ 0x800f0465, KEY_POWER2 }, /* TV Power */
82
83
{ 0x800f046e, KEY_PLAYPAUSE },
83
-
{ 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */
84
+
{ 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */
84
85
85
86
{ 0x800f0480, KEY_BRIGHTNESSDOWN },
86
87
{ 0x800f0481, KEY_PLAYPAUSE },
+16
-13
drivers/media/IR/lirc_dev.c
+16
-13
drivers/media/IR/lirc_dev.c
···
522
522
523
523
dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor);
524
524
525
-
if (!ir->attached) {
526
-
mutex_unlock(&ir->irctl_lock);
525
+
if (!ir->attached)
527
526
return POLLERR;
528
-
}
529
527
530
528
poll_wait(file, &ir->buf->wait_poll, wait);
531
529
···
647
649
if (!buf)
648
650
return -ENOMEM;
649
651
650
-
if (mutex_lock_interruptible(&ir->irctl_lock))
651
-
return -ERESTARTSYS;
652
+
if (mutex_lock_interruptible(&ir->irctl_lock)) {
653
+
ret = -ERESTARTSYS;
654
+
goto out_unlocked;
655
+
}
652
656
if (!ir->attached) {
653
-
mutex_unlock(&ir->irctl_lock);
654
-
return -ENODEV;
657
+
ret = -ENODEV;
658
+
goto out_locked;
655
659
}
656
660
657
661
if (length % ir->chunk_size) {
658
-
dev_dbg(ir->d.dev, LOGHEAD "read result = -EINVAL\n",
659
-
ir->d.name, ir->d.minor);
660
-
mutex_unlock(&ir->irctl_lock);
661
-
return -EINVAL;
662
+
ret = -EINVAL;
663
+
goto out_locked;
662
664
}
663
665
664
666
/*
···
709
711
lirc_buffer_read(ir->buf, buf);
710
712
ret = copy_to_user((void *)buffer+written, buf,
711
713
ir->buf->chunk_size);
712
-
written += ir->buf->chunk_size;
714
+
if (!ret)
715
+
written += ir->buf->chunk_size;
716
+
else
717
+
ret = -EFAULT;
713
718
}
714
719
}
715
720
716
721
remove_wait_queue(&ir->buf->wait_poll, &wait);
717
722
set_current_state(TASK_RUNNING);
723
+
724
+
out_locked:
718
725
mutex_unlock(&ir->irctl_lock);
719
726
720
727
out_unlocked:
721
728
kfree(buf);
722
729
dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n",
723
-
ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret);
730
+
ir->d.name, ir->d.minor, ret ? "<fail>" : "<ok>", ret);
724
731
725
732
return ret ? ret : written;
726
733
}
+109
-65
drivers/media/IR/mceusb.c
+109
-65
drivers/media/IR/mceusb.c
···
35
35
#include <linux/device.h>
36
36
#include <linux/module.h>
37
37
#include <linux/slab.h>
38
-
#include <linux/usb.h>
39
38
#include <linux/input.h>
39
+
#include <linux/usb.h>
40
+
#include <linux/usb/input.h>
40
41
#include <media/ir-core.h>
41
-
#include <media/ir-common.h>
42
42
43
43
#define DRIVER_VERSION "1.91"
44
44
#define DRIVER_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
···
49
49
#define USB_BUFLEN 32 /* USB reception buffer length */
50
50
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
51
51
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
52
+
#define MS_TO_NS(msec) ((msec) * 1000)
52
53
53
54
/* MCE constants */
54
55
#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
···
75
74
#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
76
75
77
76
/* Sub-commands, which follow MCE_COMMAND_HEADER or MCE_HW_CMD_HEADER */
77
+
#define MCE_CMD_SIG_END 0x01 /* End of signal */
78
78
#define MCE_CMD_PING 0x03 /* Ping device */
79
79
#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
80
80
#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
···
93
91
#define MCE_CMD_G_TXMASK 0x13 /* Set TX port bitmask */
94
92
#define MCE_CMD_S_RXSENSOR 0x14 /* Set RX sensor (std/learning) */
95
93
#define MCE_CMD_G_RXSENSOR 0x15 /* Get RX sensor (std/learning) */
94
+
#define MCE_RSP_PULSE_COUNT 0x15 /* RX pulse count (only if learning) */
96
95
#define MCE_CMD_TX_PORTS 0x16 /* Get number of TX ports */
97
96
#define MCE_CMD_G_WAKESRC 0x17 /* Get wake source */
98
97
#define MCE_CMD_UNKNOWN7 0x18 /* Unknown */
···
149
146
MCE_GEN3,
150
147
MCE_GEN2_TX_INV,
151
148
POLARIS_EVK,
149
+
CX_HYBRID_TV,
152
150
};
153
151
154
152
struct mceusb_model {
155
153
u32 mce_gen1:1;
156
154
u32 mce_gen2:1;
157
155
u32 mce_gen3:1;
158
-
u32 tx_mask_inverted:1;
156
+
u32 tx_mask_normal:1;
159
157
u32 is_polaris:1;
158
+
u32 no_tx:1;
160
159
161
160
const char *rc_map; /* Allow specify a per-board map */
162
161
const char *name; /* per-board name */
···
167
162
static const struct mceusb_model mceusb_model[] = {
168
163
[MCE_GEN1] = {
169
164
.mce_gen1 = 1,
170
-
.tx_mask_inverted = 1,
165
+
.tx_mask_normal = 1,
171
166
},
172
167
[MCE_GEN2] = {
173
168
.mce_gen2 = 1,
174
169
},
175
170
[MCE_GEN2_TX_INV] = {
176
171
.mce_gen2 = 1,
177
-
.tx_mask_inverted = 1,
172
+
.tx_mask_normal = 1,
178
173
},
179
174
[MCE_GEN3] = {
180
175
.mce_gen3 = 1,
181
-
.tx_mask_inverted = 1,
176
+
.tx_mask_normal = 1,
182
177
},
183
178
[POLARIS_EVK] = {
184
179
.is_polaris = 1,
···
188
183
* to allow testing it
189
184
*/
190
185
.rc_map = RC_MAP_RC5_HAUPPAUGE_NEW,
191
-
.name = "cx231xx MCE IR",
186
+
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
187
+
},
188
+
[CX_HYBRID_TV] = {
189
+
.is_polaris = 1,
190
+
.no_tx = 1, /* tx isn't wired up at all */
191
+
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
192
192
},
193
193
};
194
194
···
283
273
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03c) },
284
274
/* Formosa Industrial Computing */
285
275
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03e) },
276
+
/* Fintek eHome Infrared Transceiver (HP branded) */
277
+
{ USB_DEVICE(VENDOR_FINTEK, 0x5168) },
286
278
/* Fintek eHome Infrared Transceiver */
287
279
{ USB_DEVICE(VENDOR_FINTEK, 0x0602) },
288
280
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
···
304
292
{ USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) },
305
293
/* TiVo PC IR Receiver */
306
294
{ USB_DEVICE(VENDOR_TIVO, 0x2000) },
307
-
/* Conexant SDK */
295
+
/* Conexant Hybrid TV "Shelby" Polaris SDK */
308
296
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a1),
309
297
.driver_info = POLARIS_EVK },
298
+
/* Conexant Hybrid TV RDU253S Polaris */
299
+
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a5),
300
+
.driver_info = CX_HYBRID_TV },
310
301
/* Terminating entry */
311
302
{ }
312
303
};
···
318
303
struct mceusb_dev {
319
304
/* ir-core bits */
320
305
struct ir_dev_props *props;
321
-
struct ir_raw_event rawir;
306
+
307
+
/* optional features we can enable */
308
+
bool carrier_report_enabled;
309
+
bool learning_enabled;
322
310
323
311
/* core device bits */
324
312
struct device *dev;
···
336
318
/* buffers and dma */
337
319
unsigned char *buf_in;
338
320
unsigned int len_in;
321
+
dma_addr_t dma_in;
322
+
dma_addr_t dma_out;
339
323
340
324
enum {
341
325
CMD_HEADER = 0,
···
345
325
CMD_DATA,
346
326
PARSE_IRDATA,
347
327
} parser_state;
348
-
u8 cmd, rem; /* Remaining IR data bytes in packet */
349
328
350
-
dma_addr_t dma_in;
351
-
dma_addr_t dma_out;
329
+
u8 cmd, rem; /* Remaining IR data bytes in packet */
352
330
353
331
struct {
354
332
u32 connected:1;
355
-
u32 tx_mask_inverted:1;
333
+
u32 tx_mask_normal:1;
356
334
u32 microsoft_gen1:1;
335
+
u32 no_tx:1;
357
336
} flags;
358
337
359
338
/* transmit support */
···
427
408
case MCE_CMD_UNKNOWN:
428
409
case MCE_CMD_S_CARRIER:
429
410
case MCE_CMD_S_TIMEOUT:
430
-
case MCE_CMD_G_RXSENSOR:
411
+
case MCE_RSP_PULSE_COUNT:
431
412
datasize = 2;
432
413
break;
414
+
case MCE_CMD_SIG_END:
433
415
case MCE_CMD_S_TXMASK:
434
416
case MCE_CMD_S_RXSENSOR:
435
417
datasize = 1;
···
453
433
return;
454
434
455
435
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
456
-
if (ir->flags.microsoft_gen1 && !out)
436
+
if (ir->flags.microsoft_gen1 && !out && !offset)
457
437
skip = 2;
458
438
459
439
if (len <= skip)
···
511
491
break;
512
492
case MCE_COMMAND_HEADER:
513
493
switch (subcmd) {
494
+
case MCE_CMD_SIG_END:
495
+
dev_info(dev, "End of signal\n");
496
+
break;
514
497
case MCE_CMD_PING:
515
498
dev_info(dev, "Ping\n");
516
499
break;
···
548
525
inout, data1 == 0x02 ? "short" : "long");
549
526
break;
550
527
case MCE_CMD_G_RXSENSOR:
551
-
if (len == 2)
528
+
/* aka MCE_RSP_PULSE_COUNT */
529
+
if (out)
552
530
dev_info(dev, "Get receive sensor\n");
553
-
else
554
-
dev_info(dev, "Received pulse count is %d\n",
531
+
else if (ir->learning_enabled)
532
+
dev_info(dev, "RX pulse count: %d\n",
555
533
((data1 << 8) | data2));
556
534
break;
557
535
case MCE_RSP_CMD_INVALID:
···
748
724
return ret ? ret : n;
749
725
}
750
726
751
-
/* Sets active IR outputs -- mce devices typically (all?) have two */
727
+
/* Sets active IR outputs -- mce devices typically have two */
752
728
static int mceusb_set_tx_mask(void *priv, u32 mask)
753
729
{
754
730
struct mceusb_dev *ir = priv;
755
731
756
-
if (ir->flags.tx_mask_inverted)
732
+
if (ir->flags.tx_mask_normal)
733
+
ir->tx_mask = mask;
734
+
else
757
735
ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ?
758
736
mask ^ MCE_DEFAULT_TX_MASK : mask) << 1;
759
-
else
760
-
ir->tx_mask = mask;
761
737
762
738
return 0;
763
739
}
···
776
752
777
753
if (carrier == 0) {
778
754
ir->carrier = carrier;
779
-
cmdbuf[2] = 0x01;
755
+
cmdbuf[2] = MCE_CMD_SIG_END;
780
756
cmdbuf[3] = MCE_IRDATA_TRAILER;
781
757
dev_dbg(ir->dev, "%s: disabling carrier "
782
758
"modulation\n", __func__);
···
806
782
return carrier;
807
783
}
808
784
785
+
/*
786
+
* We don't do anything but print debug spew for many of the command bits
787
+
* we receive from the hardware, but some of them are useful information
788
+
* we want to store so that we can use them.
789
+
*/
790
+
static void mceusb_handle_command(struct mceusb_dev *ir, int index)
791
+
{
792
+
u8 hi = ir->buf_in[index + 1] & 0xff;
793
+
u8 lo = ir->buf_in[index + 2] & 0xff;
794
+
795
+
switch (ir->buf_in[index]) {
796
+
/* 2-byte return value commands */
797
+
case MCE_CMD_S_TIMEOUT:
798
+
ir->props->timeout = MS_TO_NS((hi << 8 | lo) / 2);
799
+
break;
800
+
801
+
/* 1-byte return value commands */
802
+
case MCE_CMD_S_TXMASK:
803
+
ir->tx_mask = hi;
804
+
break;
805
+
case MCE_CMD_S_RXSENSOR:
806
+
ir->learning_enabled = (hi == 0x02);
807
+
break;
808
+
default:
809
+
break;
810
+
}
811
+
}
812
+
809
813
static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
810
814
{
811
815
DEFINE_IR_RAW_EVENT(rawir);
···
843
791
if (ir->flags.microsoft_gen1)
844
792
i = 2;
845
793
794
+
/* if there's no data, just return now */
795
+
if (buf_len <= i)
796
+
return;
797
+
846
798
for (; i < buf_len; i++) {
847
799
switch (ir->parser_state) {
848
800
case SUBCMD:
849
801
ir->rem = mceusb_cmdsize(ir->cmd, ir->buf_in[i]);
850
802
mceusb_dev_printdata(ir, ir->buf_in, i - 1,
851
803
ir->rem + 2, false);
804
+
mceusb_handle_command(ir, i);
852
805
ir->parser_state = CMD_DATA;
853
806
break;
854
807
case PARSE_IRDATA:
855
808
ir->rem--;
856
809
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
857
810
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
858
-
* MCE_TIME_UNIT * 1000;
859
-
860
-
if ((ir->buf_in[i] & MCE_PULSE_MASK) == 0x7f) {
861
-
if (ir->rawir.pulse == rawir.pulse) {
862
-
ir->rawir.duration += rawir.duration;
863
-
} else {
864
-
ir->rawir.duration = rawir.duration;
865
-
ir->rawir.pulse = rawir.pulse;
866
-
}
867
-
if (ir->rem)
868
-
break;
869
-
}
870
-
rawir.duration += ir->rawir.duration;
871
-
ir->rawir.duration = 0;
872
-
ir->rawir.pulse = rawir.pulse;
811
+
* MS_TO_NS(MCE_TIME_UNIT);
873
812
874
813
dev_dbg(ir->dev, "Storing %s with duration %d\n",
875
814
rawir.pulse ? "pulse" : "space",
876
815
rawir.duration);
877
816
878
-
ir_raw_event_store(ir->idev, &rawir);
817
+
ir_raw_event_store_with_filter(ir->idev, &rawir);
879
818
break;
880
819
case CMD_DATA:
881
820
ir->rem--;
···
882
839
continue;
883
840
}
884
841
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
885
-
mceusb_dev_printdata(ir, ir->buf_in, i, ir->rem + 1, false);
886
-
if (ir->rem) {
842
+
mceusb_dev_printdata(ir, ir->buf_in,
843
+
i, ir->rem + 1, false);
844
+
if (ir->rem)
887
845
ir->parser_state = PARSE_IRDATA;
888
-
break;
889
-
}
890
-
/*
891
-
* a package with len=0 (e. g. 0x80) means end of
892
-
* data. We could use it to do the call to
893
-
* ir_raw_event_handle(). For now, we don't need to
894
-
* use it.
895
-
*/
896
846
break;
897
847
}
898
848
···
1020
984
mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
1021
985
mce_sync_in(ir, NULL, maxp);
1022
986
1023
-
/* get the transmitter bitmask */
1024
-
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
1025
-
mce_sync_in(ir, NULL, maxp);
987
+
if (!ir->flags.no_tx) {
988
+
/* get the transmitter bitmask */
989
+
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
990
+
mce_sync_in(ir, NULL, maxp);
991
+
}
1026
992
1027
993
/* get receiver timeout value */
1028
994
mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
···
1073
1035
props->priv = ir;
1074
1036
props->driver_type = RC_DRIVER_IR_RAW;
1075
1037
props->allowed_protos = IR_TYPE_ALL;
1076
-
props->s_tx_mask = mceusb_set_tx_mask;
1077
-
props->s_tx_carrier = mceusb_set_tx_carrier;
1078
-
props->tx_ir = mceusb_tx_ir;
1038
+
props->timeout = MS_TO_NS(1000);
1039
+
if (!ir->flags.no_tx) {
1040
+
props->s_tx_mask = mceusb_set_tx_mask;
1041
+
props->s_tx_carrier = mceusb_set_tx_carrier;
1042
+
props->tx_ir = mceusb_tx_ir;
1043
+
}
1079
1044
1080
1045
ir->props = props;
1046
+
1047
+
usb_to_input_id(ir->usbdev, &idev->id);
1048
+
idev->dev.parent = ir->dev;
1081
1049
1082
1050
if (mceusb_model[ir->model].rc_map)
1083
1051
rc_map = mceusb_model[ir->model].rc_map;
···
1118
1074
enum mceusb_model_type model = id->driver_info;
1119
1075
bool is_gen3;
1120
1076
bool is_microsoft_gen1;
1121
-
bool tx_mask_inverted;
1077
+
bool tx_mask_normal;
1122
1078
bool is_polaris;
1123
1079
1124
-
dev_dbg(&intf->dev, ": %s called\n", __func__);
1080
+
dev_dbg(&intf->dev, "%s called\n", __func__);
1125
1081
1126
1082
idesc = intf->cur_altsetting;
1127
1083
1128
1084
is_gen3 = mceusb_model[model].mce_gen3;
1129
1085
is_microsoft_gen1 = mceusb_model[model].mce_gen1;
1130
-
tx_mask_inverted = mceusb_model[model].tx_mask_inverted;
1086
+
tx_mask_normal = mceusb_model[model].tx_mask_normal;
1131
1087
is_polaris = mceusb_model[model].is_polaris;
1132
1088
1133
1089
if (is_polaris) {
···
1151
1107
ep_in = ep;
1152
1108
ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
1153
1109
ep_in->bInterval = 1;
1154
-
dev_dbg(&intf->dev, ": acceptable inbound endpoint "
1110
+
dev_dbg(&intf->dev, "acceptable inbound endpoint "
1155
1111
"found\n");
1156
1112
}
1157
1113
···
1166
1122
ep_out = ep;
1167
1123
ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
1168
1124
ep_out->bInterval = 1;
1169
-
dev_dbg(&intf->dev, ": acceptable outbound endpoint "
1125
+
dev_dbg(&intf->dev, "acceptable outbound endpoint "
1170
1126
"found\n");
1171
1127
}
1172
1128
}
1173
1129
if (ep_in == NULL) {
1174
-
dev_dbg(&intf->dev, ": inbound and/or endpoint not found\n");
1130
+
dev_dbg(&intf->dev, "inbound and/or endpoint not found\n");
1175
1131
return -ENODEV;
1176
1132
}
1177
1133
···
1194
1150
ir->dev = &intf->dev;
1195
1151
ir->len_in = maxp;
1196
1152
ir->flags.microsoft_gen1 = is_microsoft_gen1;
1197
-
ir->flags.tx_mask_inverted = tx_mask_inverted;
1153
+
ir->flags.tx_mask_normal = tx_mask_normal;
1154
+
ir->flags.no_tx = mceusb_model[model].no_tx;
1198
1155
ir->model = model;
1199
-
1200
-
init_ir_raw_event(&ir->rawir);
1201
1156
1202
1157
/* Saving usb interface data for use by the transmitter routine */
1203
1158
ir->usb_ep_in = ep_in;
···
1234
1191
1235
1192
mceusb_get_parameters(ir);
1236
1193
1237
-
mceusb_set_tx_mask(ir, MCE_DEFAULT_TX_MASK);
1194
+
if (!ir->flags.no_tx)
1195
+
mceusb_set_tx_mask(ir, MCE_DEFAULT_TX_MASK);
1238
1196
1239
1197
usb_set_intfdata(intf, ir);
1240
1198
+8
-2
drivers/media/IR/nuvoton-cir.c
+8
-2
drivers/media/IR/nuvoton-cir.c
···
603
603
count = nvt->pkts;
604
604
nvt_dbg_verbose("Processing buffer of len %d", count);
605
605
606
+
init_ir_raw_event(&rawir);
607
+
606
608
for (i = 0; i < count; i++) {
607
609
nvt->pkts--;
608
610
sample = nvt->buf[i];
···
645
643
* indicates end of IR signal, but new data incoming. In both
646
644
* cases, it means we're ready to call ir_raw_event_handle
647
645
*/
648
-
if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) &&
649
-
(sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE))
646
+
if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
647
+
nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
650
648
ir_raw_event_handle(nvt->rdev);
649
+
}
651
650
}
651
+
652
+
nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
653
+
ir_raw_event_handle(nvt->rdev);
652
654
653
655
if (nvt->pkts) {
654
656
nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
+12
-9
drivers/media/IR/streamzap.c
+12
-9
drivers/media/IR/streamzap.c
···
34
34
#include <linux/device.h>
35
35
#include <linux/module.h>
36
36
#include <linux/slab.h>
37
-
#include <linux/usb.h>
38
37
#include <linux/input.h>
38
+
#include <linux/usb.h>
39
+
#include <linux/usb/input.h>
39
40
#include <media/ir-core.h>
40
41
41
42
#define DRIVER_VERSION "1.61"
···
141
140
142
141
static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir)
143
142
{
144
-
ir_raw_event_store(sz->idev, &rawir);
143
+
dev_dbg(sz->dev, "Storing %s with duration %u us\n",
144
+
(rawir.pulse ? "pulse" : "space"), rawir.duration);
145
+
ir_raw_event_store_with_filter(sz->idev, &rawir);
145
146
}
146
147
147
148
static void sz_push_full_pulse(struct streamzap_ir *sz,
···
170
167
rawir.duration *= 1000;
171
168
rawir.duration &= IR_MAX_DURATION;
172
169
}
173
-
dev_dbg(sz->dev, "ls %u\n", rawir.duration);
174
170
sz_push(sz, rawir);
175
171
176
172
sz->idle = false;
···
182
180
sz->sum += rawir.duration;
183
181
rawir.duration *= 1000;
184
182
rawir.duration &= IR_MAX_DURATION;
185
-
dev_dbg(sz->dev, "p %u\n", rawir.duration);
186
183
sz_push(sz, rawir);
187
184
}
188
185
···
201
200
rawir.duration += SZ_RESOLUTION / 2;
202
201
sz->sum += rawir.duration;
203
202
rawir.duration *= 1000;
204
-
dev_dbg(sz->dev, "s %u\n", rawir.duration);
205
203
sz_push(sz, rawir);
206
204
}
207
205
···
221
221
struct streamzap_ir *sz;
222
222
unsigned int i;
223
223
int len;
224
-
static int timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
225
-
IR_MAX_DURATION) | 0x03000000);
226
224
227
225
if (!urb)
228
226
return;
···
244
246
245
247
dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
246
248
for (i = 0; i < len; i++) {
247
-
dev_dbg(sz->dev, "sz idx %d: %x\n",
249
+
dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n",
248
250
i, (unsigned char)sz->buf_in[i]);
249
251
switch (sz->decoder_state) {
250
252
case PulseSpace:
···
271
273
DEFINE_IR_RAW_EVENT(rawir);
272
274
273
275
rawir.pulse = false;
274
-
rawir.duration = timeout;
276
+
rawir.duration = sz->props->timeout;
275
277
sz->idle = true;
276
278
if (sz->timeout_enabled)
277
279
sz_push(sz, rawir);
···
332
334
props->allowed_protos = IR_TYPE_ALL;
333
335
334
336
sz->props = props;
337
+
338
+
usb_to_input_id(sz->usbdev, &idev->id);
339
+
idev->dev.parent = sz->dev;
335
340
336
341
ret = ir_input_register(idev, RC_MAP_STREAMZAP, props, DRIVER_NAME);
337
342
if (ret < 0) {
···
445
444
sz->decoder_state = PulseSpace;
446
445
/* FIXME: don't yet have a way to set this */
447
446
sz->timeout_enabled = true;
447
+
sz->props->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
448
+
IR_MAX_DURATION) | 0x03000000);
448
449
#if 0
449
450
/* not yet supported, depends on patches from maxim */
450
451
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
+4
-4
drivers/media/common/saa7146_hlp.c
+4
-4
drivers/media/common/saa7146_hlp.c
···
558
558
static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
559
559
{
560
560
struct saa7146_vv *vv = dev->vv_data;
561
-
struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat);
561
+
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
562
562
563
563
int b_depth = vv->ov_fmt->depth;
564
564
int b_bpl = vv->ov_fb.fmt.bytesperline;
···
702
702
struct saa7146_vv *vv = dev->vv_data;
703
703
struct saa7146_video_dma vdma1;
704
704
705
-
struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
705
+
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
706
706
707
707
int width = buf->fmt->width;
708
708
int height = buf->fmt->height;
···
827
827
struct saa7146_video_dma vdma2;
828
828
struct saa7146_video_dma vdma3;
829
829
830
-
struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
830
+
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
831
831
832
832
int width = buf->fmt->width;
833
833
int height = buf->fmt->height;
···
994
994
995
995
void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
996
996
{
997
-
struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
997
+
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
998
998
struct saa7146_vv *vv = dev->vv_data;
999
999
u32 vdma1_prot_addr;
1000
1000
+8
-8
drivers/media/common/saa7146_video.c
+8
-8
drivers/media/common/saa7146_video.c
···
84
84
85
85
static int NUM_FORMATS = sizeof(formats)/sizeof(struct saa7146_format);
86
86
87
-
struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc)
87
+
struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc)
88
88
{
89
89
int i, j = NUM_FORMATS;
90
90
···
266
266
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
267
267
struct scatterlist *list = dma->sglist;
268
268
int length = dma->sglen;
269
-
struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
269
+
struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
270
270
271
271
DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length));
272
272
···
408
408
}
409
409
}
410
410
411
-
fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
411
+
fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
412
412
/* we need to have a valid format set here */
413
413
BUG_ON(NULL == fmt);
414
414
···
460
460
return -EBUSY;
461
461
}
462
462
463
-
fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
463
+
fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
464
464
/* we need to have a valid format set here */
465
465
BUG_ON(NULL == fmt);
466
466
···
536
536
return -EPERM;
537
537
538
538
/* check args */
539
-
fmt = format_by_fourcc(dev, fb->fmt.pixelformat);
539
+
fmt = saa7146_format_by_fourcc(dev, fb->fmt.pixelformat);
540
540
if (NULL == fmt)
541
541
return -EINVAL;
542
542
···
760
760
761
761
DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
762
762
763
-
fmt = format_by_fourcc(dev, f->fmt.pix.pixelformat);
763
+
fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat);
764
764
if (NULL == fmt)
765
765
return -EINVAL;
766
766
···
1264
1264
buf->fmt = &fh->video_fmt;
1265
1265
buf->vb.field = fh->video_fmt.field;
1266
1266
1267
-
sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
1267
+
sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
1268
1268
1269
1269
release_all_pagetables(dev, buf);
1270
1270
if( 0 != IS_PLANAR(sfmt->trans)) {
···
1378
1378
fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24;
1379
1379
fh->video_fmt.bytesperline = 0;
1380
1380
fh->video_fmt.field = V4L2_FIELD_ANY;
1381
-
sfmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
1381
+
sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
1382
1382
fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8;
1383
1383
1384
1384
videobuf_queue_sg_init(&fh->video_q, &video_qops,
+3
-114
drivers/media/video/bt8xx/bttv-driver.c
+3
-114
drivers/media/video/bt8xx/bttv-driver.c
···
854
854
xbits |= RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM;
855
855
856
856
/* is it free? */
857
-
mutex_lock(&btv->lock);
858
857
if (btv->resources & xbits) {
859
858
/* no, someone else uses it */
860
859
goto fail;
···
883
884
/* it's free, grab it */
884
885
fh->resources |= bit;
885
886
btv->resources |= bit;
886
-
mutex_unlock(&btv->lock);
887
887
return 1;
888
888
889
889
fail:
890
-
mutex_unlock(&btv->lock);
891
890
return 0;
892
891
}
893
892
···
937
940
/* trying to free ressources not allocated by us ... */
938
941
printk("bttv: BUG! (btres)\n");
939
942
}
940
-
mutex_lock(&btv->lock);
941
943
fh->resources &= ~bits;
942
944
btv->resources &= ~bits;
943
945
···
947
951
948
952
if (0 == (bits & VBI_RESOURCES))
949
953
disclaim_vbi_lines(btv);
950
-
951
-
mutex_unlock(&btv->lock);
952
954
}
953
955
954
956
/* ----------------------------------------------------------------------- */
···
1707
1713
1708
1714
/* Make sure tvnorm and vbi_end remain consistent
1709
1715
until we're done. */
1710
-
mutex_lock(&btv->lock);
1711
1716
1712
1717
norm = btv->tvnorm;
1713
1718
1714
1719
/* In this mode capturing always starts at defrect.top
1715
1720
(default VDELAY), ignoring cropping parameters. */
1716
1721
if (btv->vbi_end > bttv_tvnorms[norm].cropcap.defrect.top) {
1717
-
mutex_unlock(&btv->lock);
1718
1722
return -EINVAL;
1719
1723
}
1720
1724
1721
-
mutex_unlock(&btv->lock);
1722
-
1723
1725
c.rect = bttv_tvnorms[norm].cropcap.defrect;
1724
1726
} else {
1725
-
mutex_lock(&btv->lock);
1726
-
1727
1727
norm = btv->tvnorm;
1728
1728
c = btv->crop[!!fh->do_crop];
1729
-
1730
-
mutex_unlock(&btv->lock);
1731
1729
1732
1730
if (width < c.min_scaled_width ||
1733
1731
width > c.max_scaled_width ||
···
1844
1858
unsigned int i;
1845
1859
int err;
1846
1860
1847
-
mutex_lock(&btv->lock);
1848
1861
err = v4l2_prio_check(&btv->prio, fh->prio);
1849
1862
if (err)
1850
1863
goto err;
···
1859
1874
set_tvnorm(btv, i);
1860
1875
1861
1876
err:
1862
-
mutex_unlock(&btv->lock);
1863
1877
1864
1878
return err;
1865
1879
}
···
1882
1898
struct bttv *btv = fh->btv;
1883
1899
int rc = 0;
1884
1900
1885
-
mutex_lock(&btv->lock);
1886
1901
if (i->index >= bttv_tvcards[btv->c.type].video_inputs) {
1887
1902
rc = -EINVAL;
1888
1903
goto err;
···
1911
1928
i->std = BTTV_NORMS;
1912
1929
1913
1930
err:
1914
-
mutex_unlock(&btv->lock);
1915
1931
1916
1932
return rc;
1917
1933
}
···
1920
1938
struct bttv_fh *fh = priv;
1921
1939
struct bttv *btv = fh->btv;
1922
1940
1923
-
mutex_lock(&btv->lock);
1924
1941
*i = btv->input;
1925
-
mutex_unlock(&btv->lock);
1926
1942
1927
1943
return 0;
1928
1944
}
···
1932
1952
1933
1953
int err;
1934
1954
1935
-
mutex_lock(&btv->lock);
1936
1955
err = v4l2_prio_check(&btv->prio, fh->prio);
1937
1956
if (unlikely(err))
1938
1957
goto err;
···
1944
1965
set_input(btv, i, btv->tvnorm);
1945
1966
1946
1967
err:
1947
-
mutex_unlock(&btv->lock);
1948
1968
return 0;
1949
1969
}
1950
1970
···
1957
1979
if (unlikely(0 != t->index))
1958
1980
return -EINVAL;
1959
1981
1960
-
mutex_lock(&btv->lock);
1961
1982
if (unlikely(btv->tuner_type == TUNER_ABSENT)) {
1962
1983
err = -EINVAL;
1963
1984
goto err;
···
1972
1995
btv->audio_mode_gpio(btv, t, 1);
1973
1996
1974
1997
err:
1975
-
mutex_unlock(&btv->lock);
1976
1998
1977
1999
return 0;
1978
2000
}
···
1982
2006
struct bttv_fh *fh = priv;
1983
2007
struct bttv *btv = fh->btv;
1984
2008
1985
-
mutex_lock(&btv->lock);
1986
2009
f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1987
2010
f->frequency = btv->freq;
1988
-
mutex_unlock(&btv->lock);
1989
2011
1990
2012
return 0;
1991
2013
}
···
1998
2024
if (unlikely(f->tuner != 0))
1999
2025
return -EINVAL;
2000
2026
2001
-
mutex_lock(&btv->lock);
2002
2027
err = v4l2_prio_check(&btv->prio, fh->prio);
2003
2028
if (unlikely(err))
2004
2029
goto err;
···
2012
2039
if (btv->has_matchbox && btv->radio_user)
2013
2040
tea5757_set_freq(btv, btv->freq);
2014
2041
err:
2015
-
mutex_unlock(&btv->lock);
2016
2042
2017
2043
return 0;
2018
2044
}
···
2144
2172
2145
2173
/* Make sure tvnorm, vbi_end and the current cropping parameters
2146
2174
remain consistent until we're done. */
2147
-
mutex_lock(&btv->lock);
2148
2175
2149
2176
b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds;
2150
2177
···
2221
2250
rc = 0; /* success */
2222
2251
2223
2252
fail:
2224
-
mutex_unlock(&btv->lock);
2225
2253
2226
2254
return rc;
2227
2255
}
···
2252
2282
if (V4L2_FIELD_ANY == field) {
2253
2283
__s32 height2;
2254
2284
2255
-
mutex_lock(&fh->btv->lock);
2256
2285
height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1;
2257
-
mutex_unlock(&fh->btv->lock);
2258
2286
field = (win->w.height > height2)
2259
2287
? V4L2_FIELD_INTERLACED
2260
2288
: V4L2_FIELD_TOP;
···
2328
2360
}
2329
2361
}
2330
2362
2331
-
mutex_lock(&fh->cap.vb_lock);
2332
2363
/* clip against screen */
2333
2364
if (NULL != btv->fbuf.base)
2334
2365
n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height,
···
2358
2391
fh->ov.field = win->field;
2359
2392
fh->ov.setup_ok = 1;
2360
2393
2361
-
/*
2362
-
* FIXME: btv is protected by btv->lock mutex, while btv->init
2363
-
* is protected by fh->cap.vb_lock. This seems to open the
2364
-
* possibility for some race situations. Maybe the better would
2365
-
* be to unify those locks or to use another way to store the
2366
-
* init values that will be consumed by videobuf callbacks
2367
-
*/
2368
2394
btv->init.ov.w.width = win->w.width;
2369
2395
btv->init.ov.w.height = win->w.height;
2370
2396
btv->init.ov.field = win->field;
···
2372
2412
bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new);
2373
2413
retval = bttv_switch_overlay(btv,fh,new);
2374
2414
}
2375
-
mutex_unlock(&fh->cap.vb_lock);
2376
2415
return retval;
2377
2416
}
2378
2417
···
2485
2526
if (V4L2_FIELD_ANY == field) {
2486
2527
__s32 height2;
2487
2528
2488
-
mutex_lock(&btv->lock);
2489
2529
height2 = btv->crop[!!fh->do_crop].rect.height >> 1;
2490
-
mutex_unlock(&btv->lock);
2491
2530
field = (f->fmt.pix.height > height2)
2492
2531
? V4L2_FIELD_INTERLACED
2493
2532
: V4L2_FIELD_BOTTOM;
···
2571
2614
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
2572
2615
2573
2616
/* update our state informations */
2574
-
mutex_lock(&fh->cap.vb_lock);
2575
2617
fh->fmt = fmt;
2576
2618
fh->cap.field = f->fmt.pix.field;
2577
2619
fh->cap.last = V4L2_FIELD_NONE;
···
2579
2623
btv->init.fmt = fmt;
2580
2624
btv->init.width = f->fmt.pix.width;
2581
2625
btv->init.height = f->fmt.pix.height;
2582
-
mutex_unlock(&fh->cap.vb_lock);
2583
2626
2584
2627
return 0;
2585
2628
}
···
2604
2649
unsigned int i;
2605
2650
struct bttv_fh *fh = priv;
2606
2651
2607
-
mutex_lock(&fh->cap.vb_lock);
2608
2652
retval = __videobuf_mmap_setup(&fh->cap, gbuffers, gbufsize,
2609
2653
V4L2_MEMORY_MMAP);
2610
2654
if (retval < 0) {
2611
-
mutex_unlock(&fh->cap.vb_lock);
2612
2655
return retval;
2613
2656
}
2614
2657
···
2618
2665
for (i = 0; i < gbuffers; i++)
2619
2666
mbuf->offsets[i] = i * gbufsize;
2620
2667
2621
-
mutex_unlock(&fh->cap.vb_lock);
2622
2668
return 0;
2623
2669
}
2624
2670
#endif
···
2727
2775
int retval = 0;
2728
2776
2729
2777
if (on) {
2730
-
mutex_lock(&fh->cap.vb_lock);
2731
2778
/* verify args */
2732
2779
if (unlikely(!btv->fbuf.base)) {
2733
-
mutex_unlock(&fh->cap.vb_lock);
2734
2780
return -EINVAL;
2735
2781
}
2736
2782
if (unlikely(!fh->ov.setup_ok)) {
···
2737
2787
}
2738
2788
if (retval)
2739
2789
return retval;
2740
-
mutex_unlock(&fh->cap.vb_lock);
2741
2790
}
2742
2791
2743
2792
if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY))
2744
2793
return -EBUSY;
2745
2794
2746
-
mutex_lock(&fh->cap.vb_lock);
2747
2795
if (on) {
2748
2796
fh->ov.tvnorm = btv->tvnorm;
2749
2797
new = videobuf_sg_alloc(sizeof(*new));
···
2753
2805
2754
2806
/* switch over */
2755
2807
retval = bttv_switch_overlay(btv, fh, new);
2756
-
mutex_unlock(&fh->cap.vb_lock);
2757
2808
return retval;
2758
2809
}
2759
2810
···
2791
2844
}
2792
2845
2793
2846
/* ok, accept it */
2794
-
mutex_lock(&fh->cap.vb_lock);
2795
2847
btv->fbuf.base = fb->base;
2796
2848
btv->fbuf.fmt.width = fb->fmt.width;
2797
2849
btv->fbuf.fmt.height = fb->fmt.height;
···
2822
2876
retval = bttv_switch_overlay(btv, fh, new);
2823
2877
}
2824
2878
}
2825
-
mutex_unlock(&fh->cap.vb_lock);
2826
2879
return retval;
2827
2880
}
2828
2881
···
2900
2955
c->id >= V4L2_CID_PRIVATE_LASTP1))
2901
2956
return -EINVAL;
2902
2957
2903
-
mutex_lock(&btv->lock);
2904
2958
if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME))
2905
2959
*c = no_ctl;
2906
2960
else {
···
2907
2963
2908
2964
*c = (NULL != ctrl) ? *ctrl : no_ctl;
2909
2965
}
2910
-
mutex_unlock(&btv->lock);
2911
2966
2912
2967
return 0;
2913
2968
}
···
2917
2974
struct bttv_fh *fh = f;
2918
2975
struct bttv *btv = fh->btv;
2919
2976
2920
-
mutex_lock(&btv->lock);
2921
2977
v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id,
2922
2978
&parm->parm.capture.timeperframe);
2923
-
mutex_unlock(&btv->lock);
2924
2979
2925
2980
return 0;
2926
2981
}
···
2934
2993
if (0 != t->index)
2935
2994
return -EINVAL;
2936
2995
2937
-
mutex_lock(&btv->lock);
2938
2996
t->rxsubchans = V4L2_TUNER_SUB_MONO;
2939
2997
bttv_call_all(btv, tuner, g_tuner, t);
2940
2998
strcpy(t->name, "Television");
···
2945
3005
if (btv->audio_mode_gpio)
2946
3006
btv->audio_mode_gpio(btv, t, 0);
2947
3007
2948
-
mutex_unlock(&btv->lock);
2949
3008
return 0;
2950
3009
}
2951
3010
···
2953
3014
struct bttv_fh *fh = f;
2954
3015
struct bttv *btv = fh->btv;
2955
3016
2956
-
mutex_lock(&btv->lock);
2957
3017
*p = v4l2_prio_max(&btv->prio);
2958
-
mutex_unlock(&btv->lock);
2959
3018
2960
3019
return 0;
2961
3020
}
···
2965
3028
struct bttv *btv = fh->btv;
2966
3029
int rc;
2967
3030
2968
-
mutex_lock(&btv->lock);
2969
3031
rc = v4l2_prio_change(&btv->prio, &fh->prio, prio);
2970
-
mutex_unlock(&btv->lock);
2971
3032
2972
3033
return rc;
2973
3034
}
···
2980
3045
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
2981
3046
return -EINVAL;
2982
3047
2983
-
mutex_lock(&btv->lock);
2984
3048
*cap = bttv_tvnorms[btv->tvnorm].cropcap;
2985
-
mutex_unlock(&btv->lock);
2986
3049
2987
3050
return 0;
2988
3051
}
···
2998
3065
inconsistent with fh->width or fh->height and apps
2999
3066
do not expect a change here. */
3000
3067
3001
-
mutex_lock(&btv->lock);
3002
3068
crop->c = btv->crop[!!fh->do_crop].rect;
3003
-
mutex_unlock(&btv->lock);
3004
3069
3005
3070
return 0;
3006
3071
}
···
3022
3091
/* Make sure tvnorm, vbi_end and the current cropping
3023
3092
parameters remain consistent until we're done. Note
3024
3093
read() may change vbi_end in check_alloc_btres_lock(). */
3025
-
mutex_lock(&btv->lock);
3026
3094
retval = v4l2_prio_check(&btv->prio, fh->prio);
3027
3095
if (0 != retval) {
3028
-
mutex_unlock(&btv->lock);
3029
3096
return retval;
3030
3097
}
3031
3098
3032
3099
retval = -EBUSY;
3033
3100
3034
3101
if (locked_btres(fh->btv, VIDEO_RESOURCES)) {
3035
-
mutex_unlock(&btv->lock);
3036
3102
return retval;
3037
3103
}
3038
3104
···
3041
3113
3042
3114
b_top = max(b->top, btv->vbi_end);
3043
3115
if (b_top + 32 >= b_bottom) {
3044
-
mutex_unlock(&btv->lock);
3045
3116
return retval;
3046
3117
}
3047
3118
···
3063
3136
3064
3137
btv->crop[1] = c;
3065
3138
3066
-
mutex_unlock(&btv->lock);
3067
-
3068
3139
fh->do_crop = 1;
3069
-
3070
-
mutex_lock(&fh->cap.vb_lock);
3071
3140
3072
3141
if (fh->width < c.min_scaled_width) {
3073
3142
fh->width = c.min_scaled_width;
···
3080
3157
fh->height = c.max_scaled_height;
3081
3158
btv->init.height = c.max_scaled_height;
3082
3159
}
3083
-
3084
-
mutex_unlock(&fh->cap.vb_lock);
3085
3160
3086
3161
return 0;
3087
3162
}
···
3148
3227
return videobuf_poll_stream(file, &fh->vbi, wait);
3149
3228
}
3150
3229
3151
-
mutex_lock(&fh->cap.vb_lock);
3152
3230
if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
3153
3231
/* streaming capture */
3154
3232
if (list_empty(&fh->cap.stream))
···
3182
3262
else
3183
3263
rc = 0;
3184
3264
err:
3185
-
mutex_unlock(&fh->cap.vb_lock);
3186
3265
return rc;
3187
3266
}
3188
3267
···
3212
3293
return -ENOMEM;
3213
3294
file->private_data = fh;
3214
3295
3215
-
/*
3216
-
* btv is protected by btv->lock mutex, while btv->init and other
3217
-
* streaming vars are protected by fh->cap.vb_lock. We need to take
3218
-
* care of both locks to avoid troubles. However, vb_lock is used also
3219
-
* inside videobuf, without calling buf->lock. So, it is a very bad
3220
-
* idea to hold both locks at the same time.
3221
-
* Let's first copy btv->init at fh, holding cap.vb_lock, and then work
3222
-
* with the rest of init, holding btv->lock.
3223
-
*/
3224
-
mutex_lock(&fh->cap.vb_lock);
3225
3296
*fh = btv->init;
3226
-
mutex_unlock(&fh->cap.vb_lock);
3227
3297
3228
3298
fh->type = type;
3229
3299
fh->ov.setup_ok = 0;
3230
3300
3231
-
mutex_lock(&btv->lock);
3232
3301
v4l2_prio_open(&btv->prio, &fh->prio);
3233
3302
3234
3303
videobuf_queue_sg_init(&fh->cap, &bttv_video_qops,
···
3224
3317
V4L2_BUF_TYPE_VIDEO_CAPTURE,
3225
3318
V4L2_FIELD_INTERLACED,
3226
3319
sizeof(struct bttv_buffer),
3227
-
fh, NULL);
3320
+
fh, &btv->lock);
3228
3321
videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops,
3229
3322
&btv->c.pci->dev, &btv->s_lock,
3230
3323
V4L2_BUF_TYPE_VBI_CAPTURE,
3231
3324
V4L2_FIELD_SEQ_TB,
3232
3325
sizeof(struct bttv_buffer),
3233
-
fh, NULL);
3326
+
fh, &btv->lock);
3234
3327
set_tvnorm(btv,btv->tvnorm);
3235
3328
set_input(btv, btv->input, btv->tvnorm);
3236
3329
···
3253
3346
bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm);
3254
3347
3255
3348
bttv_field_count(btv);
3256
-
mutex_unlock(&btv->lock);
3257
3349
return 0;
3258
3350
}
3259
3351
···
3261
3355
struct bttv_fh *fh = file->private_data;
3262
3356
struct bttv *btv = fh->btv;
3263
3357
3264
-
mutex_lock(&btv->lock);
3265
3358
/* turn off overlay */
3266
3359
if (check_btres(fh, RESOURCE_OVERLAY))
3267
3360
bttv_switch_overlay(btv,fh,NULL);
···
3286
3381
3287
3382
/* free stuff */
3288
3383
3289
-
/*
3290
-
* videobuf uses cap.vb_lock - we should avoid holding btv->lock,
3291
-
* otherwise we may have dead lock conditions
3292
-
*/
3293
-
mutex_unlock(&btv->lock);
3294
3384
videobuf_mmap_free(&fh->cap);
3295
3385
videobuf_mmap_free(&fh->vbi);
3296
-
mutex_lock(&btv->lock);
3297
3386
v4l2_prio_close(&btv->prio, fh->prio);
3298
3387
file->private_data = NULL;
3299
3388
kfree(fh);
···
3297
3398
3298
3399
if (!btv->users)
3299
3400
audio_mute(btv, 1);
3300
-
mutex_unlock(&btv->lock);
3301
3401
3302
3402
return 0;
3303
3403
}
···
3400
3502
if (unlikely(!fh))
3401
3503
return -ENOMEM;
3402
3504
file->private_data = fh;
3403
-
mutex_lock(&fh->cap.vb_lock);
3404
3505
*fh = btv->init;
3405
-
mutex_unlock(&fh->cap.vb_lock);
3406
3506
3407
-
mutex_lock(&btv->lock);
3408
3507
v4l2_prio_open(&btv->prio, &fh->prio);
3409
3508
3410
3509
btv->radio_user++;
···
3409
3514
bttv_call_all(btv, tuner, s_radio);
3410
3515
audio_input(btv,TVAUDIO_INPUT_RADIO);
3411
3516
3412
-
mutex_unlock(&btv->lock);
3413
3517
return 0;
3414
3518
}
3415
3519
···
3418
3524
struct bttv *btv = fh->btv;
3419
3525
struct rds_command cmd;
3420
3526
3421
-
mutex_lock(&btv->lock);
3422
3527
v4l2_prio_close(&btv->prio, fh->prio);
3423
3528
file->private_data = NULL;
3424
3529
kfree(fh);
···
3425
3532
btv->radio_user--;
3426
3533
3427
3534
bttv_call_all(btv, core, ioctl, RDS_CMD_CLOSE, &cmd);
3428
-
mutex_unlock(&btv->lock);
3429
3535
3430
3536
return 0;
3431
3537
}
···
3453
3561
return -EINVAL;
3454
3562
if (0 != t->index)
3455
3563
return -EINVAL;
3456
-
mutex_lock(&btv->lock);
3457
3564
strcpy(t->name, "Radio");
3458
3565
t->type = V4L2_TUNER_RADIO;
3459
3566
···
3460
3569
3461
3570
if (btv->audio_mode_gpio)
3462
3571
btv->audio_mode_gpio(btv, t, 0);
3463
-
3464
-
mutex_unlock(&btv->lock);
3465
3572
3466
3573
return 0;
3467
3574
}
···
3581
3692
.open = radio_open,
3582
3693
.read = radio_read,
3583
3694
.release = radio_release,
3584
-
.ioctl = video_ioctl2,
3695
+
.unlocked_ioctl = video_ioctl2,
3585
3696
.poll = radio_poll,
3586
3697
};
3587
3698
+184
-232
drivers/media/video/gspca/sonixj.c
+184
-232
drivers/media/video/gspca/sonixj.c
···
63
63
#define QUALITY_DEF 80
64
64
u8 jpegqual; /* webcam quality */
65
65
66
+
u8 reg01;
67
+
u8 reg17;
66
68
u8 reg18;
69
+
u8 flags;
67
70
68
71
s8 ag_cnt;
69
72
#define AG_CNT_START 13
···
98
95
SENSOR_SOI768,
99
96
SENSOR_SP80708,
100
97
};
98
+
99
+
/* device flags */
100
+
#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */
101
+
102
+
/* sn9c1xx definitions */
103
+
/* register 0x01 */
104
+
#define S_PWR_DN 0x01 /* sensor power down */
105
+
#define S_PDN_INV 0x02 /* inverse pin S_PWR_DN */
106
+
#define V_TX_EN 0x04 /* video transfer enable */
107
+
#define LED 0x08 /* output to pin LED */
108
+
#define SCL_SEL_OD 0x20 /* open-drain mode */
109
+
#define SYS_SEL_48M 0x40 /* system clock 0: 24MHz, 1: 48MHz */
110
+
/* register 0x17 */
111
+
#define MCK_SIZE_MASK 0x1f /* sensor master clock */
112
+
#define SEN_CLK_EN 0x20 /* enable sensor clock */
113
+
#define DEF_EN 0x80 /* defect pixel by 0: soft, 1: hard */
101
114
102
115
/* V4L2 controls supported by the driver */
103
116
static void setbrightness(struct gspca_dev *gspca_dev);
···
1774
1755
}
1775
1756
}
1776
1757
1777
-
static void bridge_init(struct gspca_dev *gspca_dev,
1778
-
const u8 *sn9c1xx)
1779
-
{
1780
-
struct sd *sd = (struct sd *) gspca_dev;
1781
-
u8 reg0102[2];
1782
-
const u8 *reg9a;
1783
-
static const u8 reg9a_def[] =
1784
-
{0x00, 0x40, 0x20, 0x00, 0x00, 0x00};
1785
-
static const u8 reg9a_spec[] =
1786
-
{0x00, 0x40, 0x38, 0x30, 0x00, 0x20};
1787
-
static const u8 regd4[] = {0x60, 0x00, 0x00};
1788
-
1789
-
/* sensor clock already enabled in sd_init */
1790
-
/* reg_w1(gspca_dev, 0xf1, 0x00); */
1791
-
reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
1792
-
1793
-
/* configure gpio */
1794
-
reg0102[0] = sn9c1xx[1];
1795
-
reg0102[1] = sn9c1xx[2];
1796
-
if (gspca_dev->audio)
1797
-
reg0102[1] |= 0x04; /* keep the audio connection */
1798
-
reg_w(gspca_dev, 0x01, reg0102, 2);
1799
-
reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
1800
-
reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5);
1801
-
switch (sd->sensor) {
1802
-
case SENSOR_GC0307:
1803
-
case SENSOR_OV7660:
1804
-
case SENSOR_PO1030:
1805
-
case SENSOR_PO2030N:
1806
-
case SENSOR_SOI768:
1807
-
case SENSOR_SP80708:
1808
-
reg9a = reg9a_spec;
1809
-
break;
1810
-
default:
1811
-
reg9a = reg9a_def;
1812
-
break;
1813
-
}
1814
-
reg_w(gspca_dev, 0x9a, reg9a, 6);
1815
-
1816
-
reg_w(gspca_dev, 0xd4, regd4, sizeof regd4);
1817
-
1818
-
reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f);
1819
-
1820
-
switch (sd->sensor) {
1821
-
case SENSOR_ADCM1700:
1822
-
reg_w1(gspca_dev, 0x01, 0x43);
1823
-
reg_w1(gspca_dev, 0x17, 0x62);
1824
-
reg_w1(gspca_dev, 0x01, 0x42);
1825
-
reg_w1(gspca_dev, 0x01, 0x42);
1826
-
break;
1827
-
case SENSOR_GC0307:
1828
-
msleep(50);
1829
-
reg_w1(gspca_dev, 0x01, 0x61);
1830
-
reg_w1(gspca_dev, 0x17, 0x22);
1831
-
reg_w1(gspca_dev, 0x01, 0x60);
1832
-
reg_w1(gspca_dev, 0x01, 0x40);
1833
-
msleep(50);
1834
-
break;
1835
-
case SENSOR_MI0360B:
1836
-
reg_w1(gspca_dev, 0x01, 0x61);
1837
-
reg_w1(gspca_dev, 0x17, 0x60);
1838
-
reg_w1(gspca_dev, 0x01, 0x60);
1839
-
reg_w1(gspca_dev, 0x01, 0x40);
1840
-
break;
1841
-
case SENSOR_MT9V111:
1842
-
reg_w1(gspca_dev, 0x01, 0x61);
1843
-
reg_w1(gspca_dev, 0x17, 0x61);
1844
-
reg_w1(gspca_dev, 0x01, 0x60);
1845
-
reg_w1(gspca_dev, 0x01, 0x40);
1846
-
break;
1847
-
case SENSOR_OM6802:
1848
-
msleep(10);
1849
-
reg_w1(gspca_dev, 0x02, 0x73);
1850
-
reg_w1(gspca_dev, 0x17, 0x60);
1851
-
reg_w1(gspca_dev, 0x01, 0x22);
1852
-
msleep(100);
1853
-
reg_w1(gspca_dev, 0x01, 0x62);
1854
-
reg_w1(gspca_dev, 0x17, 0x64);
1855
-
reg_w1(gspca_dev, 0x17, 0x64);
1856
-
reg_w1(gspca_dev, 0x01, 0x42);
1857
-
msleep(10);
1858
-
reg_w1(gspca_dev, 0x01, 0x42);
1859
-
i2c_w8(gspca_dev, om6802_init0[0]);
1860
-
i2c_w8(gspca_dev, om6802_init0[1]);
1861
-
msleep(15);
1862
-
reg_w1(gspca_dev, 0x02, 0x71);
1863
-
msleep(150);
1864
-
break;
1865
-
case SENSOR_OV7630:
1866
-
reg_w1(gspca_dev, 0x01, 0x61);
1867
-
reg_w1(gspca_dev, 0x17, 0xe2);
1868
-
reg_w1(gspca_dev, 0x01, 0x60);
1869
-
reg_w1(gspca_dev, 0x01, 0x40);
1870
-
break;
1871
-
case SENSOR_OV7648:
1872
-
reg_w1(gspca_dev, 0x01, 0x63);
1873
-
reg_w1(gspca_dev, 0x17, 0x20);
1874
-
reg_w1(gspca_dev, 0x01, 0x62);
1875
-
reg_w1(gspca_dev, 0x01, 0x42);
1876
-
break;
1877
-
case SENSOR_PO1030:
1878
-
case SENSOR_SOI768:
1879
-
reg_w1(gspca_dev, 0x01, 0x61);
1880
-
reg_w1(gspca_dev, 0x17, 0x20);
1881
-
reg_w1(gspca_dev, 0x01, 0x60);
1882
-
reg_w1(gspca_dev, 0x01, 0x40);
1883
-
break;
1884
-
case SENSOR_PO2030N:
1885
-
case SENSOR_OV7660:
1886
-
reg_w1(gspca_dev, 0x01, 0x63);
1887
-
reg_w1(gspca_dev, 0x17, 0x20);
1888
-
reg_w1(gspca_dev, 0x01, 0x62);
1889
-
reg_w1(gspca_dev, 0x01, 0x42);
1890
-
break;
1891
-
case SENSOR_SP80708:
1892
-
reg_w1(gspca_dev, 0x01, 0x63);
1893
-
reg_w1(gspca_dev, 0x17, 0x20);
1894
-
reg_w1(gspca_dev, 0x01, 0x62);
1895
-
reg_w1(gspca_dev, 0x01, 0x42);
1896
-
msleep(100);
1897
-
reg_w1(gspca_dev, 0x02, 0x62);
1898
-
break;
1899
-
default:
1900
-
/* case SENSOR_HV7131R: */
1901
-
/* case SENSOR_MI0360: */
1902
-
/* case SENSOR_MO4000: */
1903
-
reg_w1(gspca_dev, 0x01, 0x43);
1904
-
reg_w1(gspca_dev, 0x17, 0x61);
1905
-
reg_w1(gspca_dev, 0x01, 0x42);
1906
-
if (sd->sensor == SENSOR_HV7131R)
1907
-
hv7131r_probe(gspca_dev);
1908
-
break;
1909
-
}
1910
-
}
1911
-
1912
1758
/* this function is called at probe time */
1913
1759
static int sd_config(struct gspca_dev *gspca_dev,
1914
1760
const struct usb_device_id *id)
···
1782
1898
struct cam *cam;
1783
1899
1784
1900
sd->bridge = id->driver_info >> 16;
1785
-
sd->sensor = id->driver_info;
1901
+
sd->sensor = id->driver_info >> 8;
1902
+
sd->flags = id->driver_info;
1786
1903
1787
1904
cam = &gspca_dev->cam;
1788
1905
if (sd->sensor == SENSOR_ADCM1700) {
···
1814
1929
/* setup a selector by bridge */
1815
1930
reg_w1(gspca_dev, 0xf1, 0x01);
1816
1931
reg_r(gspca_dev, 0x00, 1);
1817
-
reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]);
1932
+
reg_w1(gspca_dev, 0xf1, 0x00);
1818
1933
reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */
1819
1934
regF1 = gspca_dev->usb_buf[0];
1820
1935
if (gspca_dev->usb_err < 0)
···
2308
2423
{
2309
2424
struct sd *sd = (struct sd *) gspca_dev;
2310
2425
int i;
2311
-
u8 reg1, reg17;
2426
+
u8 reg01, reg17;
2427
+
u8 reg0102[2];
2312
2428
const u8 *sn9c1xx;
2313
2429
const u8 (*init)[8];
2430
+
const u8 *reg9a;
2314
2431
int mode;
2432
+
static const u8 reg9a_def[] =
2433
+
{0x00, 0x40, 0x20, 0x00, 0x00, 0x00};
2434
+
static const u8 reg9a_spec[] =
2435
+
{0x00, 0x40, 0x38, 0x30, 0x00, 0x20};
2436
+
static const u8 regd4[] = {0x60, 0x00, 0x00};
2315
2437
static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f };
2316
2438
static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
2317
2439
static const u8 CA_adcm1700[] =
···
2340
2448
2341
2449
/* initialize the bridge */
2342
2450
sn9c1xx = sn_tb[sd->sensor];
2343
-
bridge_init(gspca_dev, sn9c1xx);
2451
+
2452
+
/* sensor clock already enabled in sd_init */
2453
+
/* reg_w1(gspca_dev, 0xf1, 0x00); */
2454
+
reg01 = sn9c1xx[1];
2455
+
if (sd->flags & PDN_INV)
2456
+
reg01 ^= S_PDN_INV; /* power down inverted */
2457
+
reg_w1(gspca_dev, 0x01, reg01);
2458
+
2459
+
/* configure gpio */
2460
+
reg0102[0] = reg01;
2461
+
reg0102[1] = sn9c1xx[2];
2462
+
if (gspca_dev->audio)
2463
+
reg0102[1] |= 0x04; /* keep the audio connection */
2464
+
reg_w(gspca_dev, 0x01, reg0102, 2);
2465
+
reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
2466
+
reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5);
2467
+
switch (sd->sensor) {
2468
+
case SENSOR_GC0307:
2469
+
case SENSOR_OV7660:
2470
+
case SENSOR_PO1030:
2471
+
case SENSOR_PO2030N:
2472
+
case SENSOR_SOI768:
2473
+
case SENSOR_SP80708:
2474
+
reg9a = reg9a_spec;
2475
+
break;
2476
+
default:
2477
+
reg9a = reg9a_def;
2478
+
break;
2479
+
}
2480
+
reg_w(gspca_dev, 0x9a, reg9a, 6);
2481
+
2482
+
reg_w(gspca_dev, 0xd4, regd4, sizeof regd4);
2483
+
2484
+
reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f);
2485
+
2486
+
reg17 = sn9c1xx[0x17];
2487
+
switch (sd->sensor) {
2488
+
case SENSOR_GC0307:
2489
+
msleep(50); /*fixme: is it useful? */
2490
+
break;
2491
+
case SENSOR_OM6802:
2492
+
msleep(10);
2493
+
reg_w1(gspca_dev, 0x02, 0x73);
2494
+
reg17 |= SEN_CLK_EN;
2495
+
reg_w1(gspca_dev, 0x17, reg17);
2496
+
reg_w1(gspca_dev, 0x01, 0x22);
2497
+
msleep(100);
2498
+
reg01 = SCL_SEL_OD | S_PDN_INV;
2499
+
reg17 &= MCK_SIZE_MASK;
2500
+
reg17 |= 0x04; /* clock / 4 */
2501
+
break;
2502
+
}
2503
+
reg01 |= SYS_SEL_48M;
2504
+
reg_w1(gspca_dev, 0x01, reg01);
2505
+
reg17 |= SEN_CLK_EN;
2506
+
reg_w1(gspca_dev, 0x17, reg17);
2507
+
reg01 &= ~S_PWR_DN; /* sensor power on */
2508
+
reg_w1(gspca_dev, 0x01, reg01);
2509
+
reg01 &= ~SYS_SEL_48M;
2510
+
reg_w1(gspca_dev, 0x01, reg01);
2511
+
2512
+
switch (sd->sensor) {
2513
+
case SENSOR_HV7131R:
2514
+
hv7131r_probe(gspca_dev); /*fixme: is it useful? */
2515
+
break;
2516
+
case SENSOR_OM6802:
2517
+
msleep(10);
2518
+
reg_w1(gspca_dev, 0x01, reg01);
2519
+
i2c_w8(gspca_dev, om6802_init0[0]);
2520
+
i2c_w8(gspca_dev, om6802_init0[1]);
2521
+
msleep(15);
2522
+
reg_w1(gspca_dev, 0x02, 0x71);
2523
+
msleep(150);
2524
+
break;
2525
+
case SENSOR_SP80708:
2526
+
msleep(100);
2527
+
reg_w1(gspca_dev, 0x02, 0x62);
2528
+
break;
2529
+
}
2344
2530
2345
2531
/* initialize the sensor */
2346
2532
i2c_w_seq(gspca_dev, sensor_init[sd->sensor]);
···
2446
2476
}
2447
2477
reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]);
2448
2478
switch (sd->sensor) {
2449
-
case SENSOR_GC0307:
2450
-
reg17 = 0xa2;
2451
-
break;
2452
-
case SENSOR_MT9V111:
2453
-
case SENSOR_MI0360B:
2454
-
reg17 = 0xe0;
2455
-
break;
2456
-
case SENSOR_ADCM1700:
2457
-
case SENSOR_OV7630:
2458
-
reg17 = 0xe2;
2459
-
break;
2460
-
case SENSOR_OV7648:
2461
-
reg17 = 0x20;
2462
-
break;
2463
-
case SENSOR_OV7660:
2464
-
case SENSOR_SOI768:
2465
-
reg17 = 0xa0;
2466
-
break;
2467
-
case SENSOR_PO1030:
2468
-
case SENSOR_PO2030N:
2469
-
reg17 = 0xa0;
2479
+
case SENSOR_OM6802:
2480
+
/* case SENSOR_OV7648: * fixme: sometimes */
2470
2481
break;
2471
2482
default:
2472
-
reg17 = 0x60;
2483
+
reg17 |= DEF_EN;
2473
2484
break;
2474
2485
}
2475
2486
reg_w1(gspca_dev, 0x17, reg17);
···
2497
2546
2498
2547
init = NULL;
2499
2548
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
2500
-
if (mode)
2501
-
reg1 = 0x46; /* 320x240: clk 48Mhz, video trf enable */
2502
-
else
2503
-
reg1 = 0x06; /* 640x480: clk 24Mhz, video trf enable */
2504
-
reg17 = 0x61; /* 0x:20: enable sensor clock */
2549
+
reg01 |= SYS_SEL_48M | V_TX_EN;
2550
+
reg17 &= ~MCK_SIZE_MASK;
2551
+
reg17 |= 0x02; /* clock / 2 */
2505
2552
switch (sd->sensor) {
2506
2553
case SENSOR_ADCM1700:
2507
2554
init = adcm1700_sensor_param1;
2508
-
reg1 = 0x46;
2509
-
reg17 = 0xe2;
2510
2555
break;
2511
2556
case SENSOR_GC0307:
2512
2557
init = gc0307_sensor_param1;
2513
-
reg17 = 0xa2;
2514
-
reg1 = 0x44;
2558
+
break;
2559
+
case SENSOR_HV7131R:
2560
+
case SENSOR_MI0360:
2561
+
if (mode)
2562
+
reg01 |= SYS_SEL_48M; /* 320x240: clk 48Mhz */
2563
+
else
2564
+
reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */
2565
+
reg17 &= ~MCK_SIZE_MASK;
2566
+
reg17 |= 0x01; /* clock / 1 */
2515
2567
break;
2516
2568
case SENSOR_MI0360B:
2517
2569
init = mi0360b_sensor_param1;
2518
-
reg1 &= ~0x02; /* don't inverse pin S_PWR_DN */
2519
-
reg17 = 0xe2;
2520
2570
break;
2521
2571
case SENSOR_MO4000:
2522
-
if (mode) {
2523
-
/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */
2524
-
reg1 = 0x06; /* clk 24Mz */
2525
-
} else {
2526
-
reg17 = 0x22; /* 640 MCKSIZE */
2527
-
/* reg1 = 0x06; * 640 clk 24Mz (done) */
2572
+
if (mode) { /* if 320x240 */
2573
+
reg01 &= ~SYS_SEL_48M; /* clk 24Mz */
2574
+
reg17 &= ~MCK_SIZE_MASK;
2575
+
reg17 |= 0x01; /* clock / 1 */
2528
2576
}
2529
2577
break;
2530
2578
case SENSOR_MT9V111:
2531
2579
init = mt9v111_sensor_param1;
2532
-
if (mode) {
2533
-
reg1 = 0x04; /* 320 clk 48Mhz */
2534
-
} else {
2535
-
/* reg1 = 0x06; * 640 clk 24Mz (done) */
2536
-
reg17 = 0xc2;
2537
-
}
2538
2580
break;
2539
2581
case SENSOR_OM6802:
2540
2582
init = om6802_sensor_param1;
2541
-
reg17 = 0x64; /* 640 MCKSIZE */
2583
+
if (!mode) { /* if 640x480 */
2584
+
reg17 &= ~MCK_SIZE_MASK;
2585
+
reg17 |= 0x01; /* clock / 4 */
2586
+
}
2542
2587
break;
2543
2588
case SENSOR_OV7630:
2544
2589
init = ov7630_sensor_param1;
2545
-
reg17 = 0xe2;
2546
-
reg1 = 0x44;
2547
2590
break;
2548
2591
case SENSOR_OV7648:
2549
2592
init = ov7648_sensor_param1;
2550
-
reg17 = 0x21;
2551
-
/* reg1 = 0x42; * 42 - 46? */
2593
+
reg17 &= ~MCK_SIZE_MASK;
2594
+
reg17 |= 0x01; /* clock / 1 */
2552
2595
break;
2553
2596
case SENSOR_OV7660:
2554
2597
init = ov7660_sensor_param1;
2555
-
if (sd->bridge == BRIDGE_SN9C120) {
2556
-
if (mode) { /* 320x240 - 160x120 */
2557
-
reg17 = 0xa2;
2558
-
reg1 = 0x44; /* 48 Mhz, video trf eneble */
2559
-
}
2560
-
} else {
2561
-
reg17 = 0x22;
2562
-
reg1 = 0x06; /* 24 Mhz, video trf eneble
2563
-
* inverse power down */
2564
-
}
2565
2598
break;
2566
2599
case SENSOR_PO1030:
2567
2600
init = po1030_sensor_param1;
2568
-
reg17 = 0xa2;
2569
-
reg1 = 0x44;
2570
2601
break;
2571
2602
case SENSOR_PO2030N:
2572
2603
init = po2030n_sensor_param1;
2573
-
reg1 = 0x46;
2574
-
reg17 = 0xa2;
2575
2604
break;
2576
2605
case SENSOR_SOI768:
2577
2606
init = soi768_sensor_param1;
2578
-
reg1 = 0x44;
2579
-
reg17 = 0xa2;
2580
2607
break;
2581
2608
case SENSOR_SP80708:
2582
2609
init = sp80708_sensor_param1;
2583
-
if (mode) {
2584
-
/*?? reg1 = 0x04; * 320 clk 48Mhz */
2585
-
} else {
2586
-
reg1 = 0x46; /* 640 clk 48Mz */
2587
-
reg17 = 0xa2;
2588
-
}
2589
2610
break;
2590
2611
}
2591
2612
···
2607
2684
setjpegqual(gspca_dev);
2608
2685
2609
2686
reg_w1(gspca_dev, 0x17, reg17);
2610
-
reg_w1(gspca_dev, 0x01, reg1);
2687
+
reg_w1(gspca_dev, 0x01, reg01);
2688
+
sd->reg01 = reg01;
2689
+
sd->reg17 = reg17;
2611
2690
2612
2691
sethvflip(gspca_dev);
2613
2692
setbrightness(gspca_dev);
···
2631
2706
{ 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 };
2632
2707
static const u8 stopsoi768[] =
2633
2708
{ 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 };
2634
-
u8 data;
2635
-
const u8 *sn9c1xx;
2709
+
u8 reg01;
2710
+
u8 reg17;
2636
2711
2637
-
data = 0x0b;
2712
+
reg01 = sd->reg01;
2713
+
reg17 = sd->reg17 & ~SEN_CLK_EN;
2638
2714
switch (sd->sensor) {
2715
+
case SENSOR_ADCM1700:
2639
2716
case SENSOR_GC0307:
2640
-
data = 0x29;
2717
+
case SENSOR_PO2030N:
2718
+
case SENSOR_SP80708:
2719
+
reg01 |= LED;
2720
+
reg_w1(gspca_dev, 0x01, reg01);
2721
+
reg01 &= ~(LED | V_TX_EN);
2722
+
reg_w1(gspca_dev, 0x01, reg01);
2723
+
/* reg_w1(gspca_dev, 0x02, 0x??); * LED off ? */
2641
2724
break;
2642
2725
case SENSOR_HV7131R:
2726
+
reg01 &= ~V_TX_EN;
2727
+
reg_w1(gspca_dev, 0x01, reg01);
2643
2728
i2c_w8(gspca_dev, stophv7131);
2644
-
data = 0x2b;
2645
2729
break;
2646
2730
case SENSOR_MI0360:
2647
2731
case SENSOR_MI0360B:
2732
+
reg01 &= ~V_TX_EN;
2733
+
reg_w1(gspca_dev, 0x01, reg01);
2734
+
/* reg_w1(gspca_dev, 0x02, 0x40); * LED off ? */
2648
2735
i2c_w8(gspca_dev, stopmi0360);
2649
-
data = 0x29;
2650
2736
break;
2651
-
case SENSOR_OV7648:
2652
-
i2c_w8(gspca_dev, stopov7648);
2653
-
/* fall thru */
2654
2737
case SENSOR_MT9V111:
2655
-
case SENSOR_OV7630:
2738
+
case SENSOR_OM6802:
2656
2739
case SENSOR_PO1030:
2657
-
data = 0x29;
2740
+
reg01 &= ~V_TX_EN;
2741
+
reg_w1(gspca_dev, 0x01, reg01);
2742
+
break;
2743
+
case SENSOR_OV7630:
2744
+
case SENSOR_OV7648:
2745
+
reg01 &= ~V_TX_EN;
2746
+
reg_w1(gspca_dev, 0x01, reg01);
2747
+
i2c_w8(gspca_dev, stopov7648);
2748
+
break;
2749
+
case SENSOR_OV7660:
2750
+
reg01 &= ~V_TX_EN;
2751
+
reg_w1(gspca_dev, 0x01, reg01);
2658
2752
break;
2659
2753
case SENSOR_SOI768:
2660
2754
i2c_w8(gspca_dev, stopsoi768);
2661
-
data = 0x29;
2662
2755
break;
2663
2756
}
2664
-
sn9c1xx = sn_tb[sd->sensor];
2665
-
reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
2666
-
reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]);
2667
-
reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
2668
-
reg_w1(gspca_dev, 0x01, data);
2757
+
2758
+
reg01 |= SCL_SEL_OD;
2759
+
reg_w1(gspca_dev, 0x01, reg01);
2760
+
reg01 |= S_PWR_DN; /* sensor power down */
2761
+
reg_w1(gspca_dev, 0x01, reg01);
2762
+
reg_w1(gspca_dev, 0x17, reg17);
2763
+
reg01 &= ~SYS_SEL_48M; /* clock 24MHz */
2764
+
reg_w1(gspca_dev, 0x01, reg01);
2765
+
reg01 |= LED;
2766
+
reg_w1(gspca_dev, 0x01, reg01);
2669
2767
/* Don't disable sensor clock as that disables the button on the cam */
2670
2768
/* reg_w1(gspca_dev, 0xf1, 0x01); */
2671
2769
}
···
2902
2954
/* -- module initialisation -- */
2903
2955
#define BS(bridge, sensor) \
2904
2956
.driver_info = (BRIDGE_ ## bridge << 16) \
2905
-
| SENSOR_ ## sensor
2957
+
| (SENSOR_ ## sensor << 8)
2958
+
#define BSF(bridge, sensor, flags) \
2959
+
.driver_info = (BRIDGE_ ## bridge << 16) \
2960
+
| (SENSOR_ ## sensor << 8) \
2961
+
| (flags)
2906
2962
static const __devinitdata struct usb_device_id device_table[] = {
2907
2963
#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
2908
2964
{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
2909
2965
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
2910
2966
#endif
2911
-
{USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)},
2912
-
{USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)},
2967
+
{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
2968
+
{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
2913
2969
{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
2914
2970
{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
2915
2971
{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
-2
drivers/media/video/mx2_camera.c
-2
drivers/media/video/mx2_camera.c
···
807
807
808
808
if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
809
809
csicr1 |= CSICR1_REDGE;
810
-
if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
811
-
csicr1 |= CSICR1_INV_PCLK;
812
810
if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
813
811
csicr1 |= CSICR1_SOF_POL;
814
812
if (common_flags & SOCAM_HSYNC_ACTIVE_HIGH)
+48
-3
drivers/media/video/s5p-fimc/fimc-capture.c
+48
-3
drivers/media/video/s5p-fimc/fimc-capture.c
···
522
522
INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
523
523
fimc->vid_cap.active_buf_cnt = 0;
524
524
fimc->vid_cap.frame_count = 0;
525
+
fimc->vid_cap.buf_index = fimc_hw_get_frame_index(fimc);
525
526
526
527
set_bit(ST_CAPT_PEND, &fimc->state);
527
528
ret = videobuf_streamon(&fimc->vid_cap.vbq);
···
653
652
return ret;
654
653
}
655
654
655
+
static int fimc_cap_cropcap(struct file *file, void *fh,
656
+
struct v4l2_cropcap *cr)
657
+
{
658
+
struct fimc_frame *f;
659
+
struct fimc_ctx *ctx = fh;
660
+
struct fimc_dev *fimc = ctx->fimc_dev;
661
+
662
+
if (cr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
663
+
return -EINVAL;
664
+
665
+
if (mutex_lock_interruptible(&fimc->lock))
666
+
return -ERESTARTSYS;
667
+
668
+
f = &ctx->s_frame;
669
+
cr->bounds.left = 0;
670
+
cr->bounds.top = 0;
671
+
cr->bounds.width = f->o_width;
672
+
cr->bounds.height = f->o_height;
673
+
cr->defrect = cr->bounds;
674
+
675
+
mutex_unlock(&fimc->lock);
676
+
return 0;
677
+
}
678
+
679
+
static int fimc_cap_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
680
+
{
681
+
struct fimc_frame *f;
682
+
struct fimc_ctx *ctx = file->private_data;
683
+
struct fimc_dev *fimc = ctx->fimc_dev;
684
+
685
+
686
+
if (mutex_lock_interruptible(&fimc->lock))
687
+
return -ERESTARTSYS;
688
+
689
+
f = &ctx->s_frame;
690
+
cr->c.left = f->offs_h;
691
+
cr->c.top = f->offs_v;
692
+
cr->c.width = f->width;
693
+
cr->c.height = f->height;
694
+
695
+
mutex_unlock(&fimc->lock);
696
+
return 0;
697
+
}
698
+
656
699
static int fimc_cap_s_crop(struct file *file, void *fh,
657
700
struct v4l2_crop *cr)
658
701
{
···
761
716
.vidioc_g_ctrl = fimc_vidioc_g_ctrl,
762
717
.vidioc_s_ctrl = fimc_cap_s_ctrl,
763
718
764
-
.vidioc_g_crop = fimc_vidioc_g_crop,
719
+
.vidioc_g_crop = fimc_cap_g_crop,
765
720
.vidioc_s_crop = fimc_cap_s_crop,
766
-
.vidioc_cropcap = fimc_vidioc_cropcap,
721
+
.vidioc_cropcap = fimc_cap_cropcap,
767
722
768
723
.vidioc_enum_input = fimc_cap_enum_input,
769
724
.vidioc_s_input = fimc_cap_s_input,
···
830
785
videobuf_queue_dma_contig_init(&vid_cap->vbq, &fimc_qops,
831
786
vid_cap->v4l2_dev.dev, &fimc->irqlock,
832
787
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
833
-
sizeof(struct fimc_vid_buffer), (void *)ctx);
788
+
sizeof(struct fimc_vid_buffer), (void *)ctx, NULL);
834
789
835
790
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
836
791
if (ret) {
+33
-21
drivers/media/video/s5p-fimc/fimc-core.c
+33
-21
drivers/media/video/s5p-fimc/fimc-core.c
···
50
50
.planes_cnt = 1,
51
51
.flags = FMT_FLAGS_M2M,
52
52
}, {
53
-
.name = "XRGB-8-8-8-8, 24 bpp",
54
-
.fourcc = V4L2_PIX_FMT_RGB24,
53
+
.name = "XRGB-8-8-8-8, 32 bpp",
54
+
.fourcc = V4L2_PIX_FMT_RGB32,
55
55
.depth = 32,
56
56
.color = S5P_FIMC_RGB888,
57
57
.buff_cnt = 1,
···
983
983
{
984
984
struct fimc_ctx *ctx = priv;
985
985
struct v4l2_queryctrl *c;
986
+
int ret = -EINVAL;
986
987
987
988
c = get_ctrl(qc->id);
988
989
if (c) {
···
991
990
return 0;
992
991
}
993
992
994
-
if (ctx->state & FIMC_CTX_CAP)
995
-
return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
993
+
if (ctx->state & FIMC_CTX_CAP) {
994
+
if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
995
+
return -ERESTARTSYS;
996
+
ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
996
997
core, queryctrl, qc);
997
-
return -EINVAL;
998
+
mutex_unlock(&ctx->fimc_dev->lock);
999
+
}
1000
+
return ret;
998
1001
}
999
1002
1000
1003
int fimc_vidioc_g_ctrl(struct file *file, void *priv,
···
1120
1115
return 0;
1121
1116
}
1122
1117
1123
-
int fimc_vidioc_cropcap(struct file *file, void *fh,
1118
+
static int fimc_m2m_cropcap(struct file *file, void *fh,
1124
1119
struct v4l2_cropcap *cr)
1125
1120
{
1126
1121
struct fimc_frame *frame;
···
1144
1139
return 0;
1145
1140
}
1146
1141
1147
-
int fimc_vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
1142
+
static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
1148
1143
{
1149
1144
struct fimc_frame *frame;
1150
1145
struct fimc_ctx *ctx = file->private_data;
···
1172
1167
struct fimc_frame *f;
1173
1168
u32 min_size, halign;
1174
1169
1175
-
f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
1176
-
&ctx->s_frame : &ctx->d_frame;
1177
-
1178
1170
if (cr->c.top < 0 || cr->c.left < 0) {
1179
1171
v4l2_err(&fimc->m2m.v4l2_dev,
1180
1172
"doesn't support negative values for top & left\n");
1181
1173
return -EINVAL;
1182
1174
}
1183
1175
1184
-
f = ctx_get_frame(ctx, cr->type);
1185
-
if (IS_ERR(f))
1186
-
return PTR_ERR(f);
1176
+
if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1177
+
f = (ctx->state & FIMC_CTX_CAP) ? &ctx->s_frame : &ctx->d_frame;
1178
+
else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
1179
+
ctx->state & FIMC_CTX_M2M)
1180
+
f = &ctx->s_frame;
1181
+
else
1182
+
return -EINVAL;
1187
1183
1188
-
min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1189
-
? fimc->variant->min_inp_pixsize
1190
-
: fimc->variant->min_out_pixsize;
1184
+
min_size = (f == &ctx->s_frame) ?
1185
+
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
1191
1186
1192
1187
if (ctx->state & FIMC_CTX_M2M) {
1193
1188
if (fimc->id == 1 && fimc->variant->pix_hoff)
···
1238
1233
f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
1239
1234
&ctx->s_frame : &ctx->d_frame;
1240
1235
1236
+
if (mutex_lock_interruptible(&fimc->lock))
1237
+
return -ERESTARTSYS;
1238
+
1241
1239
spin_lock_irqsave(&ctx->slock, flags);
1242
1240
if (~ctx->state & (FIMC_SRC_FMT | FIMC_DST_FMT)) {
1243
1241
/* Check to see if scaling ratio is within supported range */
···
1249
1241
else
1250
1242
ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame);
1251
1243
if (ret) {
1252
-
spin_unlock_irqrestore(&ctx->slock, flags);
1253
1244
v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
1254
-
return -EINVAL;
1245
+
ret = -EINVAL;
1246
+
goto scr_unlock;
1255
1247
}
1256
1248
}
1257
1249
ctx->state |= FIMC_PARAMS;
···
1261
1253
f->width = cr->c.width;
1262
1254
f->height = cr->c.height;
1263
1255
1256
+
scr_unlock:
1264
1257
spin_unlock_irqrestore(&ctx->slock, flags);
1258
+
mutex_unlock(&fimc->lock);
1265
1259
return 0;
1266
1260
}
1267
1261
···
1295
1285
.vidioc_g_ctrl = fimc_vidioc_g_ctrl,
1296
1286
.vidioc_s_ctrl = fimc_m2m_s_ctrl,
1297
1287
1298
-
.vidioc_g_crop = fimc_vidioc_g_crop,
1288
+
.vidioc_g_crop = fimc_m2m_g_crop,
1299
1289
.vidioc_s_crop = fimc_m2m_s_crop,
1300
-
.vidioc_cropcap = fimc_vidioc_cropcap
1290
+
.vidioc_cropcap = fimc_m2m_cropcap
1301
1291
1302
1292
};
1303
1293
···
1406
1396
.open = fimc_m2m_open,
1407
1397
.release = fimc_m2m_release,
1408
1398
.poll = fimc_m2m_poll,
1409
-
.ioctl = video_ioctl2,
1399
+
.unlocked_ioctl = video_ioctl2,
1410
1400
.mmap = fimc_m2m_mmap,
1411
1401
};
1412
1402
···
1746
1736
.pix_hoff = 1,
1747
1737
.has_inp_rot = 1,
1748
1738
.has_out_rot = 1,
1739
+
.has_cistatus2 = 1,
1749
1740
.min_inp_pixsize = 16,
1750
1741
.min_out_pixsize = 16,
1751
1742
.hor_offs_align = 1,
···
1756
1745
1757
1746
static struct samsung_fimc_variant fimc2_variant_s5pv310 = {
1758
1747
.pix_hoff = 1,
1748
+
.has_cistatus2 = 1,
1759
1749
.min_inp_pixsize = 16,
1760
1750
.min_out_pixsize = 16,
1761
1751
.hor_offs_align = 1,
+16
-8
drivers/media/video/s5p-fimc/fimc-core.h
+16
-8
drivers/media/video/s5p-fimc/fimc-core.h
···
13
13
14
14
/*#define DEBUG*/
15
15
16
+
#include <linux/sched.h>
16
17
#include <linux/types.h>
18
+
#include <linux/videodev2.h>
17
19
#include <media/videobuf-core.h>
18
20
#include <media/v4l2-device.h>
19
21
#include <media/v4l2-mem2mem.h>
20
22
#include <media/v4l2-mediabus.h>
21
23
#include <media/s3c_fimc.h>
22
-
#include <linux/videodev2.h>
24
+
23
25
#include "regs-fimc.h"
24
26
25
27
#define err(fmt, args...) \
···
371
369
* @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
372
370
* @has_inp_rot: set if has input rotator
373
371
* @has_out_rot: set if has output rotator
372
+
* @has_cistatus2: 1 if CISTATUS2 register is present in this IP revision
374
373
* @pix_limit: pixel size constraints for the scaler
375
374
* @min_inp_pixsize: minimum input pixel size
376
375
* @min_out_pixsize: minimum output pixel size
···
382
379
unsigned int pix_hoff:1;
383
380
unsigned int has_inp_rot:1;
384
381
unsigned int has_out_rot:1;
382
+
unsigned int has_cistatus2:1;
385
383
struct fimc_pix_limit *pix_limit;
386
384
u16 min_inp_pixsize;
387
385
u16 min_out_pixsize;
···
558
554
return frame;
559
555
}
560
556
557
+
/* Return an index to the buffer actually being written. */
561
558
static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
562
559
{
563
-
u32 reg = readl(dev->regs + S5P_CISTATUS);
564
-
return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
565
-
S5P_CISTATUS_FRAMECNT_SHIFT;
560
+
u32 reg;
561
+
562
+
if (dev->variant->has_cistatus2) {
563
+
reg = readl(dev->regs + S5P_CISTATUS2) & 0x3F;
564
+
return reg > 0 ? --reg : reg;
565
+
} else {
566
+
reg = readl(dev->regs + S5P_CISTATUS);
567
+
return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
568
+
S5P_CISTATUS_FRAMECNT_SHIFT;
569
+
}
566
570
}
567
571
568
572
/* -----------------------------------------------------*/
···
606
594
struct v4l2_format *f);
607
595
int fimc_vidioc_try_fmt(struct file *file, void *priv,
608
596
struct v4l2_format *f);
609
-
int fimc_vidioc_g_crop(struct file *file, void *fh,
610
-
struct v4l2_crop *cr);
611
-
int fimc_vidioc_cropcap(struct file *file, void *fh,
612
-
struct v4l2_cropcap *cr);
613
597
int fimc_vidioc_queryctrl(struct file *file, void *priv,
614
598
struct v4l2_queryctrl *qc);
615
599
int fimc_vidioc_g_ctrl(struct file *file, void *priv,
+3
drivers/media/video/s5p-fimc/regs-fimc.h
+3
drivers/media/video/s5p-fimc/regs-fimc.h
···
165
165
#define S5P_CISTATUS_VVALID_A (1 << 15)
166
166
#define S5P_CISTATUS_VVALID_B (1 << 14)
167
167
168
+
/* Indexes to the last and the currently processed buffer. */
169
+
#define S5P_CISTATUS2 0x68
170
+
168
171
/* Image capture control */
169
172
#define S5P_CIIMGCPT 0xc0
170
173
#define S5P_CIIMGCPT_IMGCPTEN (1 << 31)
+1
-1
drivers/media/video/sh_mobile_ceu_camera.c
+1
-1
drivers/media/video/sh_mobile_ceu_camera.c
···
1980
1980
* we complete the completion.
1981
1981
*/
1982
1982
1983
-
if (!csi2->driver || !csi2->driver->owner) {
1983
+
if (!csi2->driver) {
1984
1984
complete(&wait.completion);
1985
1985
/* Either too late, or probing failed */
1986
1986
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
+2
-2
drivers/media/video/soc_camera.c
+2
-2
drivers/media/video/soc_camera.c
···
405
405
ret = soc_camera_set_fmt(icd, &f);
406
406
if (ret < 0)
407
407
goto esfmt;
408
+
409
+
ici->ops->init_videobuf(&icd->vb_vidq, icd);
408
410
}
409
411
410
412
file->private_data = icd;
411
413
dev_dbg(&icd->dev, "camera device open\n");
412
-
413
-
ici->ops->init_videobuf(&icd->vb_vidq, icd);
414
414
415
415
mutex_unlock(&icd->video_lock);
416
416
+1
-1
drivers/mfd/ab8500-core.c
+1
-1
drivers/mfd/ab8500-core.c
+6
-2
drivers/mfd/wm831x-core.c
+6
-2
drivers/mfd/wm831x-core.c
···
1455
1455
dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret);
1456
1456
goto err;
1457
1457
}
1458
-
if (ret != 0x6204) {
1458
+
switch (ret) {
1459
+
case 0x6204:
1460
+
case 0x6246:
1461
+
break;
1462
+
default:
1459
1463
dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret);
1460
1464
ret = -EINVAL;
1461
1465
goto err;
···
1624
1620
case WM8325:
1625
1621
ret = mfd_add_devices(wm831x->dev, -1,
1626
1622
wm8320_devs, ARRAY_SIZE(wm8320_devs),
1627
-
NULL, 0);
1623
+
NULL, wm831x->irq_base);
1628
1624
break;
1629
1625
1630
1626
default:
+1
drivers/mmc/core/core.c
+1
drivers/mmc/core/core.c
+9
-4
drivers/mmc/host/at91_mci.c
+9
-4
drivers/mmc/host/at91_mci.c
···
69
69
#include <linux/highmem.h>
70
70
71
71
#include <linux/mmc/host.h>
72
+
#include <linux/mmc/sdio.h>
72
73
73
74
#include <asm/io.h>
74
75
#include <asm/irq.h>
···
494
493
else if (data->flags & MMC_DATA_WRITE)
495
494
cmdr |= AT91_MCI_TRCMD_START;
496
495
497
-
if (data->flags & MMC_DATA_STREAM)
498
-
cmdr |= AT91_MCI_TRTYP_STREAM;
499
-
if (data->blocks > 1)
500
-
cmdr |= AT91_MCI_TRTYP_MULTIPLE;
496
+
if (cmd->opcode == SD_IO_RW_EXTENDED) {
497
+
cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
498
+
} else {
499
+
if (data->flags & MMC_DATA_STREAM)
500
+
cmdr |= AT91_MCI_TRTYP_STREAM;
501
+
if (data->blocks > 1)
502
+
cmdr |= AT91_MCI_TRTYP_MULTIPLE;
503
+
}
501
504
}
502
505
else {
503
506
block_length = 0;
+12
-6
drivers/mmc/host/atmel-mci.c
+12
-6
drivers/mmc/host/atmel-mci.c
···
26
26
#include <linux/stat.h>
27
27
28
28
#include <linux/mmc/host.h>
29
+
#include <linux/mmc/sdio.h>
29
30
30
31
#include <mach/atmel-mci.h>
31
32
#include <linux/atmel-mci.h>
···
533
532
data = cmd->data;
534
533
if (data) {
535
534
cmdr |= MCI_CMDR_START_XFER;
536
-
if (data->flags & MMC_DATA_STREAM)
537
-
cmdr |= MCI_CMDR_STREAM;
538
-
else if (data->blocks > 1)
539
-
cmdr |= MCI_CMDR_MULTI_BLOCK;
540
-
else
541
-
cmdr |= MCI_CMDR_BLOCK;
535
+
536
+
if (cmd->opcode == SD_IO_RW_EXTENDED) {
537
+
cmdr |= MCI_CMDR_SDIO_BLOCK;
538
+
} else {
539
+
if (data->flags & MMC_DATA_STREAM)
540
+
cmdr |= MCI_CMDR_STREAM;
541
+
else if (data->blocks > 1)
542
+
cmdr |= MCI_CMDR_MULTI_BLOCK;
543
+
else
544
+
cmdr |= MCI_CMDR_BLOCK;
545
+
}
542
546
543
547
if (data->flags & MMC_DATA_READ)
544
548
cmdr |= MCI_CMDR_TRDIR_READ;
+15
-24
drivers/net/atl1c/atl1c_main.c
+15
-24
drivers/net/atl1c/atl1c_main.c
···
702
702
703
703
704
704
adapter->wol = 0;
705
+
device_set_wakeup_enable(&pdev->dev, false);
705
706
adapter->link_speed = SPEED_0;
706
707
adapter->link_duplex = FULL_DUPLEX;
707
708
adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
···
2445
2444
return 0;
2446
2445
}
2447
2446
2448
-
static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2447
+
static int atl1c_suspend(struct device *dev)
2449
2448
{
2449
+
struct pci_dev *pdev = to_pci_dev(dev);
2450
2450
struct net_device *netdev = pci_get_drvdata(pdev);
2451
2451
struct atl1c_adapter *adapter = netdev_priv(netdev);
2452
2452
struct atl1c_hw *hw = &adapter->hw;
···
2456
2454
u32 wol_ctrl_data = 0;
2457
2455
u16 mii_intr_status_data = 0;
2458
2456
u32 wufc = adapter->wol;
2459
-
int retval = 0;
2460
2457
2461
2458
atl1c_disable_l0s_l1(hw);
2462
2459
if (netif_running(netdev)) {
···
2463
2462
atl1c_down(adapter);
2464
2463
}
2465
2464
netif_device_detach(netdev);
2466
-
retval = pci_save_state(pdev);
2467
-
if (retval)
2468
-
return retval;
2469
2465
2470
2466
if (wufc)
2471
2467
if (atl1c_phy_power_saving(hw) != 0)
···
2523
2525
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2524
2526
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2525
2527
2526
-
/* pcie patch */
2527
-
device_set_wakeup_enable(&pdev->dev, 1);
2528
-
2529
2528
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
2530
2529
GPHY_CTRL_EXT_RESET);
2531
-
pci_prepare_to_sleep(pdev);
2532
2530
} else {
2533
2531
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
2534
2532
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
···
2534
2540
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2535
2541
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2536
2542
hw->phy_configured = false; /* re-init PHY when resume */
2537
-
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2538
2543
}
2539
-
2540
-
pci_disable_device(pdev);
2541
-
pci_set_power_state(pdev, pci_choose_state(pdev, state));
2542
2544
2543
2545
return 0;
2544
2546
}
2545
2547
2546
-
static int atl1c_resume(struct pci_dev *pdev)
2548
+
static int atl1c_resume(struct device *dev)
2547
2549
{
2550
+
struct pci_dev *pdev = to_pci_dev(dev);
2548
2551
struct net_device *netdev = pci_get_drvdata(pdev);
2549
2552
struct atl1c_adapter *adapter = netdev_priv(netdev);
2550
-
2551
-
pci_set_power_state(pdev, PCI_D0);
2552
-
pci_restore_state(pdev);
2553
-
pci_enable_wake(pdev, PCI_D3hot, 0);
2554
-
pci_enable_wake(pdev, PCI_D3cold, 0);
2555
2553
2556
2554
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2557
2555
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
···
2568
2582
2569
2583
static void atl1c_shutdown(struct pci_dev *pdev)
2570
2584
{
2571
-
atl1c_suspend(pdev, PMSG_SUSPEND);
2585
+
struct net_device *netdev = pci_get_drvdata(pdev);
2586
+
struct atl1c_adapter *adapter = netdev_priv(netdev);
2587
+
2588
+
atl1c_suspend(&pdev->dev);
2589
+
pci_wake_from_d3(pdev, adapter->wol);
2590
+
pci_set_power_state(pdev, PCI_D3hot);
2572
2591
}
2573
2592
2574
2593
static const struct net_device_ops atl1c_netdev_ops = {
···
2877
2886
.resume = atl1c_io_resume,
2878
2887
};
2879
2888
2889
+
static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
2890
+
2880
2891
static struct pci_driver atl1c_driver = {
2881
2892
.name = atl1c_driver_name,
2882
2893
.id_table = atl1c_pci_tbl,
2883
2894
.probe = atl1c_probe,
2884
2895
.remove = __devexit_p(atl1c_remove),
2885
-
/* Power Managment Hooks */
2886
-
.suspend = atl1c_suspend,
2887
-
.resume = atl1c_resume,
2888
2896
.shutdown = atl1c_shutdown,
2889
-
.err_handler = &atl1c_err_handler
2897
+
.err_handler = &atl1c_err_handler,
2898
+
.driver.pm = &atl1c_pm_ops,
2890
2899
};
2891
2900
2892
2901
/*
+1
-1
drivers/net/benet/be.h
+1
-1
drivers/net/benet/be.h
···
234
234
u8 __iomem *db; /* Door Bell */
235
235
u8 __iomem *pcicfg; /* PCI config space */
236
236
237
-
spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
237
+
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
238
238
struct be_dma_mem mbox_mem;
239
239
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
240
240
* is stored for freeing purpose */
+45
-30
drivers/net/benet/be_cmds.c
+45
-30
drivers/net/benet/be_cmds.c
···
462
462
u8 *wrb;
463
463
int status;
464
464
465
-
spin_lock(&adapter->mbox_lock);
465
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
466
+
return -1;
466
467
467
468
wrb = (u8 *)wrb_from_mbox(adapter);
468
469
*wrb++ = 0xFF;
···
477
476
478
477
status = be_mbox_notify_wait(adapter);
479
478
480
-
spin_unlock(&adapter->mbox_lock);
479
+
mutex_unlock(&adapter->mbox_lock);
481
480
return status;
482
481
}
483
482
···
492
491
if (adapter->eeh_err)
493
492
return -EIO;
494
493
495
-
spin_lock(&adapter->mbox_lock);
494
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
495
+
return -1;
496
496
497
497
wrb = (u8 *)wrb_from_mbox(adapter);
498
498
*wrb++ = 0xFF;
···
507
505
508
506
status = be_mbox_notify_wait(adapter);
509
507
510
-
spin_unlock(&adapter->mbox_lock);
508
+
mutex_unlock(&adapter->mbox_lock);
511
509
return status;
512
510
}
513
511
int be_cmd_eq_create(struct be_adapter *adapter,
···
518
516
struct be_dma_mem *q_mem = &eq->dma_mem;
519
517
int status;
520
518
521
-
spin_lock(&adapter->mbox_lock);
519
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
520
+
return -1;
522
521
523
522
wrb = wrb_from_mbox(adapter);
524
523
req = embedded_payload(wrb);
···
549
546
eq->created = true;
550
547
}
551
548
552
-
spin_unlock(&adapter->mbox_lock);
549
+
mutex_unlock(&adapter->mbox_lock);
553
550
return status;
554
551
}
555
552
···
561
558
struct be_cmd_req_mac_query *req;
562
559
int status;
563
560
564
-
spin_lock(&adapter->mbox_lock);
561
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
562
+
return -1;
565
563
566
564
wrb = wrb_from_mbox(adapter);
567
565
req = embedded_payload(wrb);
···
587
583
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
588
584
}
589
585
590
-
spin_unlock(&adapter->mbox_lock);
586
+
mutex_unlock(&adapter->mbox_lock);
591
587
return status;
592
588
}
593
589
···
671
667
void *ctxt;
672
668
int status;
673
669
674
-
spin_lock(&adapter->mbox_lock);
670
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
671
+
return -1;
675
672
676
673
wrb = wrb_from_mbox(adapter);
677
674
req = embedded_payload(wrb);
···
706
701
cq->created = true;
707
702
}
708
703
709
-
spin_unlock(&adapter->mbox_lock);
704
+
mutex_unlock(&adapter->mbox_lock);
710
705
711
706
return status;
712
707
}
···
729
724
void *ctxt;
730
725
int status;
731
726
732
-
spin_lock(&adapter->mbox_lock);
727
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
728
+
return -1;
733
729
734
730
wrb = wrb_from_mbox(adapter);
735
731
req = embedded_payload(wrb);
···
760
754
mccq->id = le16_to_cpu(resp->id);
761
755
mccq->created = true;
762
756
}
763
-
spin_unlock(&adapter->mbox_lock);
757
+
mutex_unlock(&adapter->mbox_lock);
764
758
765
759
return status;
766
760
}
···
775
769
void *ctxt;
776
770
int status;
777
771
778
-
spin_lock(&adapter->mbox_lock);
772
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
773
+
return -1;
779
774
780
775
wrb = wrb_from_mbox(adapter);
781
776
req = embedded_payload(wrb);
···
808
801
txq->created = true;
809
802
}
810
803
811
-
spin_unlock(&adapter->mbox_lock);
804
+
mutex_unlock(&adapter->mbox_lock);
812
805
813
806
return status;
814
807
}
···
823
816
struct be_dma_mem *q_mem = &rxq->dma_mem;
824
817
int status;
825
818
826
-
spin_lock(&adapter->mbox_lock);
819
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
820
+
return -1;
827
821
828
822
wrb = wrb_from_mbox(adapter);
829
823
req = embedded_payload(wrb);
···
851
843
*rss_id = resp->rss_id;
852
844
}
853
845
854
-
spin_unlock(&adapter->mbox_lock);
846
+
mutex_unlock(&adapter->mbox_lock);
855
847
856
848
return status;
857
849
}
···
870
862
if (adapter->eeh_err)
871
863
return -EIO;
872
864
873
-
spin_lock(&adapter->mbox_lock);
865
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
866
+
return -1;
874
867
875
868
wrb = wrb_from_mbox(adapter);
876
869
req = embedded_payload(wrb);
···
908
899
909
900
status = be_mbox_notify_wait(adapter);
910
901
911
-
spin_unlock(&adapter->mbox_lock);
902
+
mutex_unlock(&adapter->mbox_lock);
912
903
913
904
return status;
914
905
}
···
924
915
struct be_cmd_req_if_create *req;
925
916
int status;
926
917
927
-
spin_lock(&adapter->mbox_lock);
918
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
919
+
return -1;
928
920
929
921
wrb = wrb_from_mbox(adapter);
930
922
req = embedded_payload(wrb);
···
951
941
*pmac_id = le32_to_cpu(resp->pmac_id);
952
942
}
953
943
954
-
spin_unlock(&adapter->mbox_lock);
944
+
mutex_unlock(&adapter->mbox_lock);
955
945
return status;
956
946
}
957
947
···
965
955
if (adapter->eeh_err)
966
956
return -EIO;
967
957
968
-
spin_lock(&adapter->mbox_lock);
958
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
959
+
return -1;
969
960
970
961
wrb = wrb_from_mbox(adapter);
971
962
req = embedded_payload(wrb);
···
981
970
982
971
status = be_mbox_notify_wait(adapter);
983
972
984
-
spin_unlock(&adapter->mbox_lock);
973
+
mutex_unlock(&adapter->mbox_lock);
985
974
986
975
return status;
987
976
}
···
1071
1060
struct be_cmd_req_get_fw_version *req;
1072
1061
int status;
1073
1062
1074
-
spin_lock(&adapter->mbox_lock);
1063
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
1064
+
return -1;
1075
1065
1076
1066
wrb = wrb_from_mbox(adapter);
1077
1067
req = embedded_payload(wrb);
···
1089
1077
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1090
1078
}
1091
1079
1092
-
spin_unlock(&adapter->mbox_lock);
1080
+
mutex_unlock(&adapter->mbox_lock);
1093
1081
return status;
1094
1082
}
1095
1083
···
1334
1322
struct be_cmd_req_query_fw_cfg *req;
1335
1323
int status;
1336
1324
1337
-
spin_lock(&adapter->mbox_lock);
1325
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
1326
+
return -1;
1338
1327
1339
1328
wrb = wrb_from_mbox(adapter);
1340
1329
req = embedded_payload(wrb);
···
1354
1341
*caps = le32_to_cpu(resp->function_caps);
1355
1342
}
1356
1343
1357
-
spin_unlock(&adapter->mbox_lock);
1344
+
mutex_unlock(&adapter->mbox_lock);
1358
1345
return status;
1359
1346
}
1360
1347
···
1365
1352
struct be_cmd_req_hdr *req;
1366
1353
int status;
1367
1354
1368
-
spin_lock(&adapter->mbox_lock);
1355
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
1356
+
return -1;
1369
1357
1370
1358
wrb = wrb_from_mbox(adapter);
1371
1359
req = embedded_payload(wrb);
···
1379
1365
1380
1366
status = be_mbox_notify_wait(adapter);
1381
1367
1382
-
spin_unlock(&adapter->mbox_lock);
1368
+
mutex_unlock(&adapter->mbox_lock);
1383
1369
return status;
1384
1370
}
1385
1371
···
1390
1376
u32 myhash[10];
1391
1377
int status;
1392
1378
1393
-
spin_lock(&adapter->mbox_lock);
1379
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
1380
+
return -1;
1394
1381
1395
1382
wrb = wrb_from_mbox(adapter);
1396
1383
req = embedded_payload(wrb);
···
1411
1396
1412
1397
status = be_mbox_notify_wait(adapter);
1413
1398
1414
-
spin_unlock(&adapter->mbox_lock);
1399
+
mutex_unlock(&adapter->mbox_lock);
1415
1400
return status;
1416
1401
}
1417
1402
+1
-1
drivers/net/benet/be_main.c
+1
-1
drivers/net/benet/be_main.c
+6
-1
drivers/net/bonding/bond_ipv6.c
+6
-1
drivers/net/bonding/bond_ipv6.c
···
88
88
}
89
89
90
90
if (vlan_id) {
91
-
skb = vlan_put_tag(skb, vlan_id);
91
+
/* The Ethernet header is not present yet, so it is
92
+
* too early to insert a VLAN tag. Force use of an
93
+
* out-of-line tag here and let dev_hard_start_xmit()
94
+
* insert it if the slave hardware can't.
95
+
*/
96
+
skb = __vlan_hwaccel_put_tag(skb, vlan_id);
92
97
if (!skb) {
93
98
pr_err("failed to insert VLAN tag\n");
94
99
return;
+10
-32
drivers/net/bonding/bond_main.c
+10
-32
drivers/net/bonding/bond_main.c
···
418
418
* @bond: bond device that got this skb for tx.
419
419
* @skb: hw accel VLAN tagged skb to transmit
420
420
* @slave_dev: slave that is supposed to xmit this skbuff
421
-
*
422
-
* When the bond gets an skb to transmit that is
423
-
* already hardware accelerated VLAN tagged, and it
424
-
* needs to relay this skb to a slave that is not
425
-
* hw accel capable, the skb needs to be "unaccelerated",
426
-
* i.e. strip the hwaccel tag and re-insert it as part
427
-
* of the payload.
428
421
*/
429
422
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
430
423
struct net_device *slave_dev)
431
424
{
432
-
unsigned short uninitialized_var(vlan_id);
433
-
434
-
/* Test vlan_list not vlgrp to catch and handle 802.1p tags */
435
-
if (!list_empty(&bond->vlan_list) &&
436
-
!(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
437
-
vlan_get_tag(skb, &vlan_id) == 0) {
438
-
skb->dev = slave_dev;
439
-
skb = vlan_put_tag(skb, vlan_id);
440
-
if (!skb) {
441
-
/* vlan_put_tag() frees the skb in case of error,
442
-
* so return success here so the calling functions
443
-
* won't attempt to free is again.
444
-
*/
445
-
return 0;
446
-
}
447
-
} else {
448
-
skb->dev = slave_dev;
449
-
}
450
-
425
+
skb->dev = slave_dev;
451
426
skb->priority = 1;
452
427
#ifdef CONFIG_NET_POLL_CONTROLLER
453
428
if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
···
1178
1203
bond_do_fail_over_mac(bond, new_active,
1179
1204
old_active);
1180
1205
1181
-
bond->send_grat_arp = bond->params.num_grat_arp;
1182
-
bond_send_gratuitous_arp(bond);
1206
+
if (netif_running(bond->dev)) {
1207
+
bond->send_grat_arp = bond->params.num_grat_arp;
1208
+
bond_send_gratuitous_arp(bond);
1183
1209
1184
-
bond->send_unsol_na = bond->params.num_unsol_na;
1185
-
bond_send_unsolicited_na(bond);
1210
+
bond->send_unsol_na = bond->params.num_unsol_na;
1211
+
bond_send_unsolicited_na(bond);
1212
+
}
1186
1213
1187
1214
write_unlock_bh(&bond->curr_slave_lock);
1188
1215
read_unlock(&bond->lock);
···
1198
1221
1199
1222
/* resend IGMP joins since active slave has changed or
1200
1223
* all were sent on curr_active_slave */
1201
-
if ((USES_PRIMARY(bond->params.mode) && new_active) ||
1202
-
bond->params.mode == BOND_MODE_ROUNDROBIN) {
1224
+
if (((USES_PRIMARY(bond->params.mode) && new_active) ||
1225
+
bond->params.mode == BOND_MODE_ROUNDROBIN) &&
1226
+
netif_running(bond->dev)) {
1203
1227
bond->igmp_retrans = bond->params.resend_igmp;
1204
1228
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
1205
1229
}
+2
-2
drivers/net/bonding/bonding.h
+2
-2
drivers/net/bonding/bonding.h
+2
-2
drivers/net/epic100.c
+2
-2
drivers/net/epic100.c
···
935
935
936
936
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
937
937
for (i = 0; i < RX_RING_SIZE; i++) {
938
-
struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
938
+
struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
939
939
ep->rx_skbuff[i] = skb;
940
940
if (skb == NULL)
941
941
break;
···
1233
1233
entry = ep->dirty_rx % RX_RING_SIZE;
1234
1234
if (ep->rx_skbuff[entry] == NULL) {
1235
1235
struct sk_buff *skb;
1236
-
skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1236
+
skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
1237
1237
if (skb == NULL)
1238
1238
break;
1239
1239
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+2
-2
drivers/net/hamachi.c
+2
-2
drivers/net/hamachi.c
···
1202
1202
}
1203
1203
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
1204
1204
for (i = 0; i < RX_RING_SIZE; i++) {
1205
-
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
1205
+
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
1206
1206
hmp->rx_skbuff[i] = skb;
1207
1207
if (skb == NULL)
1208
1208
break;
···
1669
1669
entry = hmp->dirty_rx % RX_RING_SIZE;
1670
1670
desc = &(hmp->rx_ring[entry]);
1671
1671
if (hmp->rx_skbuff[entry] == NULL) {
1672
-
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
1672
+
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
1673
1673
1674
1674
hmp->rx_skbuff[entry] = skb;
1675
1675
if (skb == NULL)
+1
drivers/net/pcmcia/axnet_cs.c
+1
drivers/net/pcmcia/axnet_cs.c
···
690
690
static struct pcmcia_device_id axnet_ids[] = {
691
691
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
692
692
PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
693
+
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
693
694
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
694
695
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
695
696
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
-1
drivers/net/pcmcia/pcnet_cs.c
-1
drivers/net/pcmcia/pcnet_cs.c
···
1493
1493
PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
1494
1494
PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
1495
1495
PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
1496
-
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
1497
1496
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
1498
1497
PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
1499
1498
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
+2
-2
drivers/net/sundance.c
+2
-2
drivers/net/sundance.c
···
1016
1016
1017
1017
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
1018
1018
for (i = 0; i < RX_RING_SIZE; i++) {
1019
-
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1019
+
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
1020
1020
np->rx_skbuff[i] = skb;
1021
1021
if (skb == NULL)
1022
1022
break;
···
1407
1407
struct sk_buff *skb;
1408
1408
entry = np->dirty_rx % RX_RING_SIZE;
1409
1409
if (np->rx_skbuff[entry] == NULL) {
1410
-
skb = dev_alloc_skb(np->rx_buf_sz);
1410
+
skb = dev_alloc_skb(np->rx_buf_sz + 2);
1411
1411
np->rx_skbuff[entry] = skb;
1412
1412
if (skb == NULL)
1413
1413
break; /* Better luck next round. */
+2
-2
drivers/net/tehuti.c
+2
-2
drivers/net/tehuti.c
···
324
324
ENTER;
325
325
master = READ_REG(priv, regINIT_SEMAPHORE);
326
326
if (!READ_REG(priv, regINIT_STATUS) && master) {
327
-
rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
327
+
rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
328
328
if (rc)
329
329
goto out;
330
330
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
···
2510
2510
MODULE_LICENSE("GPL");
2511
2511
MODULE_AUTHOR(DRIVER_AUTHOR);
2512
2512
MODULE_DESCRIPTION(BDX_DRV_DESC);
2513
-
MODULE_FIRMWARE("tehuti/firmware.bin");
2513
+
MODULE_FIRMWARE("tehuti/bdx.bin");
-1
drivers/net/typhoon.c
-1
drivers/net/typhoon.c
+4
drivers/net/usb/asix.c
+4
drivers/net/usb/asix.c
···
1508
1508
USB_DEVICE (0x0b95, 0x1780),
1509
1509
.driver_info = (unsigned long) &ax88178_info,
1510
1510
}, {
1511
+
// Logitec LAN-GTJ/U2A
1512
+
USB_DEVICE (0x0789, 0x0160),
1513
+
.driver_info = (unsigned long) &ax88178_info,
1514
+
}, {
1511
1515
// Linksys USB200M Rev 2
1512
1516
USB_DEVICE (0x13b1, 0x0018),
1513
1517
.driver_info = (unsigned long) &ax88772_info,
+11
-3
drivers/net/usb/mcs7830.c
+11
-3
drivers/net/usb/mcs7830.c
···
1
1
/*
2
-
* MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
2
+
* MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices
3
3
*
4
4
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
5
5
*
···
10
10
* Copyright (c) 2002-2003 TiVo Inc.
11
11
*
12
12
* Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
13
+
*
14
+
* 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"),
15
+
* per active notification by manufacturer
13
16
*
14
17
* TODO:
15
18
* - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
···
63
60
#define MCS7830_MAX_MCAST 64
64
61
65
62
#define MCS7830_VENDOR_ID 0x9710
63
+
#define MCS7832_PRODUCT_ID 0x7832
66
64
#define MCS7830_PRODUCT_ID 0x7830
67
65
#define MCS7730_PRODUCT_ID 0x7730
68
66
···
355
351
if (!ret)
356
352
ret = mcs7830_write_phy(dev, MII_BMCR,
357
353
BMCR_ANENABLE | BMCR_ANRESTART );
358
-
return ret < 0 ? : 0;
354
+
return ret;
359
355
}
360
356
361
357
···
630
626
}
631
627
632
628
static const struct driver_info moschip_info = {
633
-
.description = "MOSCHIP 7830/7730 usb-NET adapter",
629
+
.description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
634
630
.bind = mcs7830_bind,
635
631
.rx_fixup = mcs7830_rx_fixup,
636
632
.flags = FLAG_ETHER,
···
648
644
};
649
645
650
646
static const struct usb_device_id products[] = {
647
+
{
648
+
USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID),
649
+
.driver_info = (unsigned long) &moschip_info,
650
+
},
651
651
{
652
652
USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
653
653
.driver_info = (unsigned long) &moschip_info,
+3
-1
drivers/net/veth.c
+3
-1
drivers/net/veth.c
···
166
166
if (!(rcv->flags & IFF_UP))
167
167
goto tx_drop;
168
168
169
-
if (dev->features & NETIF_F_NO_CSUM)
169
+
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
170
+
will cause bad checksum on forwarded packets */
171
+
if (skb->ip_summed == CHECKSUM_NONE)
170
172
skb->ip_summed = rcv_priv->ip_summed;
171
173
172
174
length = skb->len + ETH_HLEN;
-1
drivers/net/wireless/hostap/hostap_main.c
-1
drivers/net/wireless/hostap/hostap_main.c
+2
drivers/net/wireless/iwlwifi/iwl-1000.c
+2
drivers/net/wireless/iwlwifi/iwl-1000.c
···
315
315
.mod_params = &iwlagn_mod_params,
316
316
.base_params = &iwl1000_base_params,
317
317
.ht_params = &iwl1000_ht_params,
318
+
.use_new_eeprom_reading = true,
318
319
};
319
320
320
321
struct iwl_cfg iwl100_bg_cfg = {
···
331
330
.ops = &iwl1000_ops,
332
331
.mod_params = &iwlagn_mod_params,
333
332
.base_params = &iwl1000_base_params,
333
+
.use_new_eeprom_reading = true,
334
334
};
335
335
336
336
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+12
drivers/net/wireless/iwlwifi/iwl-6000.c
+12
drivers/net/wireless/iwlwifi/iwl-6000.c
···
561
561
.ht_params = &iwl6000_ht_params,
562
562
.need_dc_calib = true,
563
563
.need_temp_offset_calib = true,
564
+
.use_new_eeprom_reading = true,
564
565
};
565
566
566
567
struct iwl_cfg iwl6000g2a_2abg_cfg = {
···
579
578
.base_params = &iwl6000_base_params,
580
579
.need_dc_calib = true,
581
580
.need_temp_offset_calib = true,
581
+
.use_new_eeprom_reading = true,
582
582
};
583
583
584
584
struct iwl_cfg iwl6000g2a_2bg_cfg = {
···
597
595
.base_params = &iwl6000_base_params,
598
596
.need_dc_calib = true,
599
597
.need_temp_offset_calib = true,
598
+
.use_new_eeprom_reading = true,
600
599
};
601
600
602
601
struct iwl_cfg iwl6000g2b_2agn_cfg = {
···
619
616
.need_temp_offset_calib = true,
620
617
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
621
618
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
619
+
.use_new_eeprom_reading = true,
622
620
};
623
621
624
622
struct iwl_cfg iwl6000g2b_2abg_cfg = {
···
640
636
.need_temp_offset_calib = true,
641
637
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
642
638
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
639
+
.use_new_eeprom_reading = true,
643
640
};
644
641
645
642
struct iwl_cfg iwl6000g2b_2bgn_cfg = {
···
662
657
.need_temp_offset_calib = true,
663
658
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
664
659
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
660
+
.use_new_eeprom_reading = true,
665
661
};
666
662
667
663
struct iwl_cfg iwl6000g2b_2bg_cfg = {
···
683
677
.need_temp_offset_calib = true,
684
678
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
685
679
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
680
+
.use_new_eeprom_reading = true,
686
681
};
687
682
688
683
struct iwl_cfg iwl6000g2b_bgn_cfg = {
···
705
698
.need_temp_offset_calib = true,
706
699
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
707
700
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
701
+
.use_new_eeprom_reading = true,
708
702
};
709
703
710
704
struct iwl_cfg iwl6000g2b_bg_cfg = {
···
726
718
.need_temp_offset_calib = true,
727
719
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
728
720
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
721
+
.use_new_eeprom_reading = true,
729
722
};
730
723
731
724
/*
···
813
804
.base_params = &iwl6050_base_params,
814
805
.ht_params = &iwl6000_ht_params,
815
806
.need_dc_calib = true,
807
+
.use_new_eeprom_reading = true,
816
808
};
817
809
818
810
struct iwl_cfg iwl6050_2abg_cfg = {
···
867
857
.need_dc_calib = true,
868
858
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
869
859
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
860
+
.use_new_eeprom_reading = true,
870
861
};
871
862
872
863
struct iwl_cfg iwl130_bg_cfg = {
···
887
876
.need_dc_calib = true,
888
877
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
889
878
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
879
+
.use_new_eeprom_reading = true,
890
880
};
891
881
892
882
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+86
-2
drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+86
-2
drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
···
392
392
/**
393
393
* iwlcore_eeprom_enhanced_txpower: process enhanced tx power info
394
394
*/
395
-
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
395
+
static void iwlcore_eeprom_enhanced_txpower_old(struct iwl_priv *priv)
396
396
{
397
397
int eeprom_section_count = 0;
398
398
int section, element;
···
419
419
* always check for valid entry before process
420
420
* the information
421
421
*/
422
-
if (!enhanced_txpower->common || enhanced_txpower->reserved)
422
+
if (!(enhanced_txpower->flags || enhanced_txpower->channel) ||
423
+
enhanced_txpower->delta_20_in_40)
423
424
continue;
424
425
425
426
for (element = 0; element < eeprom_section_count; element++) {
···
452
451
max_txpower_in_half_dbm;
453
452
}
454
453
}
454
+
}
455
+
456
+
static void
457
+
iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
458
+
struct iwl_eeprom_enhanced_txpwr *txp,
459
+
s8 max_txpower_avg)
460
+
{
461
+
int ch_idx;
462
+
bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
463
+
enum ieee80211_band band;
464
+
465
+
band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
466
+
IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
467
+
468
+
for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
469
+
struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
470
+
471
+
/* update matching channel or from common data only */
472
+
if (txp->channel != 0 && ch_info->channel != txp->channel)
473
+
continue;
474
+
475
+
/* update matching band only */
476
+
if (band != ch_info->band)
477
+
continue;
478
+
479
+
if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
480
+
ch_info->max_power_avg = max_txpower_avg;
481
+
ch_info->curr_txpow = max_txpower_avg;
482
+
ch_info->scan_power = max_txpower_avg;
483
+
}
484
+
485
+
if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
486
+
ch_info->ht40_max_power_avg = max_txpower_avg;
487
+
}
488
+
}
489
+
490
+
#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
491
+
#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
492
+
#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
493
+
494
+
static void iwlcore_eeprom_enhanced_txpower_new(struct iwl_priv *priv)
495
+
{
496
+
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
497
+
int idx, entries;
498
+
__le16 *txp_len;
499
+
s8 max_txp_avg, max_txp_avg_halfdbm;
500
+
501
+
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
502
+
503
+
/* the length is in 16-bit words, but we want entries */
504
+
txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
505
+
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
506
+
507
+
txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
508
+
for (idx = 0; idx < entries; idx++) {
509
+
txp = &txp_array[idx];
510
+
511
+
/* skip invalid entries */
512
+
if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
513
+
continue;
514
+
515
+
max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
516
+
&max_txp_avg_halfdbm);
517
+
518
+
/*
519
+
* Update the user limit values values to the highest
520
+
* power supported by any channel
521
+
*/
522
+
if (max_txp_avg > priv->tx_power_user_lmt)
523
+
priv->tx_power_user_lmt = max_txp_avg;
524
+
if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
525
+
priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
526
+
527
+
iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
528
+
}
529
+
}
530
+
531
+
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
532
+
{
533
+
if (priv->cfg->use_new_eeprom_reading)
534
+
iwlcore_eeprom_enhanced_txpower_new(priv);
535
+
else
536
+
iwlcore_eeprom_enhanced_txpower_old(priv);
455
537
}
+6
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+6
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
···
569
569
case INDIRECT_REGULATORY:
570
570
offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
571
571
break;
572
+
case INDIRECT_TXP_LIMIT:
573
+
offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
574
+
break;
575
+
case INDIRECT_TXP_LIMIT_SIZE:
576
+
offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
577
+
break;
572
578
case INDIRECT_CALIBRATION:
573
579
offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
574
580
break;
+1
drivers/net/wireless/iwlwifi/iwl-core.h
+1
drivers/net/wireless/iwlwifi/iwl-core.h
+21
-4
drivers/net/wireless/iwlwifi/iwl-eeprom.h
+21
-4
drivers/net/wireless/iwlwifi/iwl-eeprom.h
···
120
120
s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
121
121
} __packed;
122
122
123
+
enum iwl_eeprom_enhanced_txpwr_flags {
124
+
IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
125
+
IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
126
+
IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
127
+
IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
128
+
IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
129
+
IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
130
+
IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
131
+
IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
132
+
};
133
+
123
134
/**
124
135
* iwl_eeprom_enhanced_txpwr structure
125
136
* This structure presents the enhanced regulatory tx power limit layout
···
138
127
* Enhanced regulatory tx power portion of eeprom image can be broken down
139
128
* into individual structures; each one is 8 bytes in size and contain the
140
129
* following information
141
-
* @common: (desc + channel) not used by driver, should _NOT_ be "zero"
130
+
* @flags: entry flags
131
+
* @channel: channel number
142
132
* @chain_a_max_pwr: chain a max power in 1/2 dBm
143
133
* @chain_b_max_pwr: chain b max power in 1/2 dBm
144
134
* @chain_c_max_pwr: chain c max power in 1/2 dBm
145
-
* @reserved: not used, should be "zero"
135
+
* @delta_20_in_40: 20-in-40 deltas (hi/lo)
146
136
* @mimo2_max_pwr: mimo2 max power in 1/2 dBm
147
137
* @mimo3_max_pwr: mimo3 max power in 1/2 dBm
148
138
*
149
139
*/
150
140
struct iwl_eeprom_enhanced_txpwr {
151
-
__le16 common;
141
+
u8 flags;
142
+
u8 channel;
152
143
s8 chain_a_max;
153
144
s8 chain_b_max;
154
145
s8 chain_c_max;
155
-
s8 reserved;
146
+
u8 delta_20_in_40;
156
147
s8 mimo2_max;
157
148
s8 mimo3_max;
158
149
} __packed;
···
199
186
#define EEPROM_LINK_CALIBRATION (2*0x67)
200
187
#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
201
188
#define EEPROM_LINK_OTHERS (2*0x69)
189
+
#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
190
+
#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
202
191
203
192
/* agn regulatory - indirect access */
204
193
#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
···
404
389
#define INDIRECT_CALIBRATION 0x00040000
405
390
#define INDIRECT_PROCESS_ADJST 0x00050000
406
391
#define INDIRECT_OTHERS 0x00060000
392
+
#define INDIRECT_TXP_LIMIT 0x00070000
393
+
#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
407
394
#define INDIRECT_ADDRESS 0x00100000
408
395
409
396
/* General */
+1
-1
drivers/net/wireless/libertas/cfg.c
+1
-1
drivers/net/wireless/libertas/cfg.c
+6
drivers/net/wireless/p54/p54usb.c
+6
drivers/net/wireless/p54/p54usb.c
···
43
43
44
44
static struct usb_device_id p54u_table[] __devinitdata = {
45
45
/* Version 1 devices (pci chip + net2280) */
46
+
{USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
46
47
{USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
47
48
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
48
49
{USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
···
57
56
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
58
57
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
59
58
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
59
+
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
60
60
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
61
61
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
62
+
{USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
63
+
{USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
62
64
{USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
65
+
{USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */
63
66
{USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
64
67
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
65
68
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
···
99
94
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
100
95
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
101
96
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
97
+
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
102
98
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
103
99
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
104
100
{USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
+1
drivers/net/wireless/rt2x00/rt2800pci.c
+1
drivers/net/wireless/rt2x00/rt2800pci.c
···
912
912
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
913
913
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
914
914
__set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags);
915
+
__set_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags);
915
916
if (!modparam_nohwcrypt)
916
917
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
917
918
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
+1
drivers/net/wireless/rt2x00/rt2x00.h
+1
drivers/net/wireless/rt2x00/rt2x00.h
+6
-3
drivers/net/wireless/rt2x00/rt2x00dev.c
+6
-3
drivers/net/wireless/rt2x00/rt2x00dev.c
···
390
390
* through a mac80211 library call (RTS/CTS) then we should not
391
391
* send the status report back.
392
392
*/
393
-
if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
394
-
ieee80211_tx_status(rt2x00dev->hw, entry->skb);
395
-
else
393
+
if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
394
+
if (test_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags))
395
+
ieee80211_tx_status(rt2x00dev->hw, entry->skb);
396
+
else
397
+
ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
398
+
} else
396
399
dev_kfree_skb_any(entry->skb);
397
400
398
401
/*
+2
-2
drivers/net/yellowfin.c
+2
-2
drivers/net/yellowfin.c
···
744
744
}
745
745
746
746
for (i = 0; i < RX_RING_SIZE; i++) {
747
-
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
747
+
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
748
748
yp->rx_skbuff[i] = skb;
749
749
if (skb == NULL)
750
750
break;
···
1157
1157
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1158
1158
entry = yp->dirty_rx % RX_RING_SIZE;
1159
1159
if (yp->rx_skbuff[entry] == NULL) {
1160
-
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1160
+
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
1161
1161
if (skb == NULL)
1162
1162
break; /* Better luck next round. */
1163
1163
yp->rx_skbuff[entry] = skb;
+1
-1
drivers/of/of_i2c.c
+1
-1
drivers/of/of_i2c.c
+5
-76
drivers/pci/bus.c
+5
-76
drivers/pci/bus.c
···
64
64
}
65
65
}
66
66
67
-
static bool pci_bus_resource_better(struct resource *res1, bool pos1,
68
-
struct resource *res2, bool pos2)
69
-
{
70
-
/* If exactly one is positive decode, always prefer that one */
71
-
if (pos1 != pos2)
72
-
return pos1 ? true : false;
73
-
74
-
/* Prefer the one that contains the highest address */
75
-
if (res1->end != res2->end)
76
-
return (res1->end > res2->end) ? true : false;
77
-
78
-
/* Otherwise, prefer the one with highest "center of gravity" */
79
-
if (res1->start != res2->start)
80
-
return (res1->start > res2->start) ? true : false;
81
-
82
-
/* Otherwise, choose one arbitrarily (but consistently) */
83
-
return (res1 > res2) ? true : false;
84
-
}
85
-
86
-
static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res)
87
-
{
88
-
struct pci_bus_resource *bus_res;
89
-
90
-
/*
91
-
* This relies on the fact that pci_bus.resource[] refers to P2P or
92
-
* CardBus bridge base/limit registers, which are always positively
93
-
* decoded. The pci_bus.resources list contains host bridge or
94
-
* subtractively decoded resources.
95
-
*/
96
-
list_for_each_entry(bus_res, &bus->resources, list) {
97
-
if (bus_res->res == res)
98
-
return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ?
99
-
false : true;
100
-
}
101
-
return true;
102
-
}
103
-
104
-
/*
105
-
* Find the next-best bus resource after the cursor "res". If the cursor is
106
-
* NULL, return the best resource. "Best" means that we prefer positive
107
-
* decode regions over subtractive decode, then those at higher addresses.
108
-
*/
109
-
static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
110
-
unsigned int type,
111
-
struct resource *res)
112
-
{
113
-
bool res_pos, r_pos, prev_pos = false;
114
-
struct resource *r, *prev = NULL;
115
-
int i;
116
-
117
-
res_pos = pci_bus_resource_positive(bus, res);
118
-
pci_bus_for_each_resource(bus, r, i) {
119
-
if (!r)
120
-
continue;
121
-
122
-
if ((r->flags & IORESOURCE_TYPE_BITS) != type)
123
-
continue;
124
-
125
-
r_pos = pci_bus_resource_positive(bus, r);
126
-
if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) {
127
-
if (!prev || pci_bus_resource_better(r, r_pos,
128
-
prev, prev_pos)) {
129
-
prev = r;
130
-
prev_pos = r_pos;
131
-
}
132
-
}
133
-
}
134
-
135
-
return prev;
136
-
}
137
-
138
67
/**
139
68
* pci_bus_alloc_resource - allocate a resource from a parent bus
140
69
* @bus: PCI bus
···
89
160
resource_size_t),
90
161
void *alignf_data)
91
162
{
92
-
int ret = -ENOMEM;
163
+
int i, ret = -ENOMEM;
93
164
struct resource *r;
94
165
resource_size_t max = -1;
95
-
unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
96
166
97
167
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
98
168
···
99
171
if (!(res->flags & IORESOURCE_MEM_64))
100
172
max = PCIBIOS_MAX_MEM_32;
101
173
102
-
/* Look for space at highest addresses first */
103
-
r = pci_bus_find_resource_prev(bus, type, NULL);
104
-
for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
174
+
pci_bus_for_each_resource(bus, r, i) {
175
+
if (!r)
176
+
continue;
177
+
105
178
/* type_mask must match */
106
179
if ((res->flags ^ r->flags) & type_mask)
107
180
continue;
+5
drivers/pci/dmar.c
+5
drivers/pci/dmar.c
+2
-1
drivers/pci/hotplug/pciehp_acpi.c
+2
-1
drivers/pci/hotplug/pciehp_acpi.c
···
115
115
static int __init select_detection_mode(void)
116
116
{
117
117
struct dummy_slot *slot, *tmp;
118
-
pcie_port_service_register(&dummy_driver);
118
+
if (pcie_port_service_register(&dummy_driver))
119
+
return PCIEHP_DETECT_ACPI;
119
120
pcie_port_service_unregister(&dummy_driver);
120
121
list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
121
122
list_del(&slot->list);
+26
drivers/pci/quirks.c
+26
drivers/pci/quirks.c
···
2329
2329
{
2330
2330
u32 cfg;
2331
2331
2332
+
if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2333
+
return;
2334
+
2332
2335
pci_read_config_dword(dev, 0x74, &cfg);
2333
2336
2334
2337
if (cfg & ((1 << 2) | (1 << 15))) {
···
2767
2764
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2768
2765
#endif /*CONFIG_MMC_RICOH_MMC*/
2769
2766
2767
+
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
2768
+
#define VTUNCERRMSK_REG 0x1ac
2769
+
#define VTD_MSK_SPEC_ERRORS (1 << 31)
2770
+
/*
2771
+
* This is a quirk for masking vt-d spec defined errors to platform error
2772
+
* handling logic. With out this, platforms using Intel 7500, 5500 chipsets
2773
+
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
2774
+
* on the RAS config settings of the platform) when a vt-d fault happens.
2775
+
* The resulting SMI caused the system to hang.
2776
+
*
2777
+
* VT-d spec related errors are already handled by the VT-d OS code, so no
2778
+
* need to report the same error through other channels.
2779
+
*/
2780
+
static void vtd_mask_spec_errors(struct pci_dev *dev)
2781
+
{
2782
+
u32 word;
2783
+
2784
+
pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
2785
+
pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
2786
+
}
2787
+
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
2788
+
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
2789
+
#endif
2770
2790
2771
2791
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
2772
2792
struct pci_fixup *end)
+1
-1
drivers/rtc/rtc-rs5c372.c
+1
-1
drivers/rtc/rtc-rs5c372.c
···
207
207
static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
208
208
{
209
209
struct rs5c372 *rs5c = i2c_get_clientdata(client);
210
-
unsigned char buf[8];
210
+
unsigned char buf[7];
211
211
int addr;
212
212
213
213
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
+2
-2
drivers/scsi/bfa/bfa_fcs.c
+2
-2
drivers/scsi/bfa/bfa_fcs.c
···
677
677
bfa_trc(fabric->fcs, event);
678
678
wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn);
679
679
680
-
BFA_LOG(KERN_INFO, bfad, log_level,
680
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
681
681
"Port is isolated due to VF_ID mismatch. "
682
682
"PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.",
683
683
pwwn_ptr, fabric->fcs->port_vfid,
···
1411
1411
wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport));
1412
1412
wwn2str(fwwn_ptr,
1413
1413
bfa_fcs_lport_get_fabric_name(&fabric->bport));
1414
-
BFA_LOG(KERN_WARNING, bfad, log_level,
1414
+
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1415
1415
"Base port WWN = %s Fabric WWN = %s\n",
1416
1416
pwwn_ptr, fwwn_ptr);
1417
1417
}
+3
-3
drivers/scsi/bfa/bfa_fcs_fcpim.c
+3
-3
drivers/scsi/bfa/bfa_fcs_fcpim.c
···
261
261
bfa_fcb_itnim_online(itnim->itnim_drv);
262
262
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
263
263
wwn2str(rpwwn_buf, itnim->rport->pwwn);
264
-
BFA_LOG(KERN_INFO, bfad, log_level,
264
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
265
265
"Target (WWN = %s) is online for initiator (WWN = %s)\n",
266
266
rpwwn_buf, lpwwn_buf);
267
267
break;
···
301
301
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
302
302
wwn2str(rpwwn_buf, itnim->rport->pwwn);
303
303
if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
304
-
BFA_LOG(KERN_ERR, bfad, log_level,
304
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
305
305
"Target (WWN = %s) connectivity lost for "
306
306
"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
307
307
else
308
-
BFA_LOG(KERN_INFO, bfad, log_level,
308
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
309
309
"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
310
310
rpwwn_buf, lpwwn_buf);
311
311
break;
+5
-5
drivers/scsi/bfa/bfa_fcs_lport.c
+5
-5
drivers/scsi/bfa/bfa_fcs_lport.c
···
491
491
__port_action[port->fabric->fab_type].online(port);
492
492
493
493
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
494
-
BFA_LOG(KERN_INFO, bfad, log_level,
494
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
495
495
"Logical port online: WWN = %s Role = %s\n",
496
496
lpwwn_buf, "Initiator");
497
497
···
512
512
513
513
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
514
514
if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
515
-
BFA_LOG(KERN_ERR, bfad, log_level,
515
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
516
516
"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
517
517
lpwwn_buf, "Initiator");
518
518
else
519
-
BFA_LOG(KERN_INFO, bfad, log_level,
519
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
520
520
"Logical port taken offline: WWN = %s Role = %s\n",
521
521
lpwwn_buf, "Initiator");
522
522
···
573
573
char lpwwn_buf[BFA_STRING_32];
574
574
575
575
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
576
-
BFA_LOG(KERN_INFO, bfad, log_level,
576
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
577
577
"Logical port deleted: WWN = %s Role = %s\n",
578
578
lpwwn_buf, "Initiator");
579
579
···
878
878
vport ? vport->vport_drv : NULL);
879
879
880
880
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
881
-
BFA_LOG(KERN_INFO, bfad, log_level,
881
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
882
882
"New logical port created: WWN = %s Role = %s\n",
883
883
lpwwn_buf, "Initiator");
884
884
+3
-3
drivers/scsi/bfa/bfa_fcs_rport.c
+3
-3
drivers/scsi/bfa/bfa_fcs_rport.c
···
2056
2056
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2057
2057
wwn2str(rpwwn_buf, rport->pwwn);
2058
2058
if (!BFA_FCS_PID_IS_WKA(rport->pid))
2059
-
BFA_LOG(KERN_INFO, bfad, log_level,
2059
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2060
2060
"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
2061
2061
rpwwn_buf, lpwwn_buf);
2062
2062
}
···
2075
2075
wwn2str(rpwwn_buf, rport->pwwn);
2076
2076
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2077
2077
if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
2078
-
BFA_LOG(KERN_ERR, bfad, log_level,
2078
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2079
2079
"Remote port (WWN = %s) connectivity lost for "
2080
2080
"logical port (WWN = %s)\n",
2081
2081
rpwwn_buf, lpwwn_buf);
2082
2082
else
2083
-
BFA_LOG(KERN_INFO, bfad, log_level,
2083
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2084
2084
"Remote port (WWN = %s) offlined by "
2085
2085
"logical port (WWN = %s)\n",
2086
2086
rpwwn_buf, lpwwn_buf);
+4
-4
drivers/scsi/bfa/bfa_ioc.c
+4
-4
drivers/scsi/bfa/bfa_ioc.c
···
402
402
403
403
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
404
404
bfa_ioc_hb_monitor(ioc);
405
-
BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
405
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
406
406
}
407
407
408
408
static void
···
444
444
{
445
445
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446
446
bfa_iocpf_disable(ioc);
447
-
BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
447
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
448
448
}
449
449
450
450
/*
···
565
565
notify->cbfn(notify->cbarg);
566
566
}
567
567
568
-
BFA_LOG(KERN_CRIT, bfad, log_level,
568
+
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
569
569
"Heart Beat of IOC has failed\n");
570
570
}
571
571
···
1812
1812
* Provide enable completion callback.
1813
1813
*/
1814
1814
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1815
-
BFA_LOG(KERN_WARNING, bfad, log_level,
1815
+
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1816
1816
"Running firmware version is incompatible "
1817
1817
"with the driver version\n");
1818
1818
}
+14
-14
drivers/scsi/bfa/bfa_svc.c
+14
-14
drivers/scsi/bfa/bfa_svc.c
···
2138
2138
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2139
2139
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2140
2140
wwn2str(pwwn_buf, fcport->pwwn);
2141
-
BFA_LOG(KERN_INFO, bfad, log_level,
2141
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2142
2142
"Base port disabled: WWN = %s\n", pwwn_buf);
2143
2143
break;
2144
2144
···
2198
2198
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199
2199
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2200
2200
wwn2str(pwwn_buf, fcport->pwwn);
2201
-
BFA_LOG(KERN_INFO, bfad, log_level,
2201
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2202
2202
"Base port disabled: WWN = %s\n", pwwn_buf);
2203
2203
break;
2204
2204
···
2251
2251
2252
2252
bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2253
2253
wwn2str(pwwn_buf, fcport->pwwn);
2254
-
BFA_LOG(KERN_INFO, bfad, log_level,
2254
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2255
2255
"Base port online: WWN = %s\n", pwwn_buf);
2256
2256
break;
2257
2257
···
2277
2277
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2278
2278
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2279
2279
wwn2str(pwwn_buf, fcport->pwwn);
2280
-
BFA_LOG(KERN_INFO, bfad, log_level,
2280
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281
2281
"Base port disabled: WWN = %s\n", pwwn_buf);
2282
2282
break;
2283
2283
···
2322
2322
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2323
2323
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2324
2324
wwn2str(pwwn_buf, fcport->pwwn);
2325
-
BFA_LOG(KERN_INFO, bfad, log_level,
2325
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2326
2326
"Base port offline: WWN = %s\n", pwwn_buf);
2327
-
BFA_LOG(KERN_INFO, bfad, log_level,
2327
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2328
2328
"Base port disabled: WWN = %s\n", pwwn_buf);
2329
2329
break;
2330
2330
···
2336
2336
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2337
2337
wwn2str(pwwn_buf, fcport->pwwn);
2338
2338
if (BFA_PORT_IS_DISABLED(fcport->bfa))
2339
-
BFA_LOG(KERN_INFO, bfad, log_level,
2339
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2340
2340
"Base port offline: WWN = %s\n", pwwn_buf);
2341
2341
else
2342
-
BFA_LOG(KERN_ERR, bfad, log_level,
2342
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2343
2343
"Base port (WWN = %s) "
2344
2344
"lost fabric connectivity\n", pwwn_buf);
2345
2345
break;
···
2349
2349
bfa_fcport_reset_linkinfo(fcport);
2350
2350
wwn2str(pwwn_buf, fcport->pwwn);
2351
2351
if (BFA_PORT_IS_DISABLED(fcport->bfa))
2352
-
BFA_LOG(KERN_INFO, bfad, log_level,
2352
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2353
2353
"Base port offline: WWN = %s\n", pwwn_buf);
2354
2354
else
2355
-
BFA_LOG(KERN_ERR, bfad, log_level,
2355
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2356
2356
"Base port (WWN = %s) "
2357
2357
"lost fabric connectivity\n", pwwn_buf);
2358
2358
break;
···
2363
2363
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2364
2364
wwn2str(pwwn_buf, fcport->pwwn);
2365
2365
if (BFA_PORT_IS_DISABLED(fcport->bfa))
2366
-
BFA_LOG(KERN_INFO, bfad, log_level,
2366
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2367
2367
"Base port offline: WWN = %s\n", pwwn_buf);
2368
2368
else
2369
-
BFA_LOG(KERN_ERR, bfad, log_level,
2369
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2370
2370
"Base port (WWN = %s) "
2371
2371
"lost fabric connectivity\n", pwwn_buf);
2372
2372
break;
···
2497
2497
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2498
2498
BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2499
2499
wwn2str(pwwn_buf, fcport->pwwn);
2500
-
BFA_LOG(KERN_INFO, bfad, log_level,
2500
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2501
2501
"Base port enabled: WWN = %s\n", pwwn_buf);
2502
2502
break;
2503
2503
···
2551
2551
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2552
2552
BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2553
2553
wwn2str(pwwn_buf, fcport->pwwn);
2554
-
BFA_LOG(KERN_INFO, bfad, log_level,
2554
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2555
2555
"Base port enabled: WWN = %s\n", pwwn_buf);
2556
2556
break;
2557
2557
+4
-4
drivers/scsi/bfa/bfad.c
+4
-4
drivers/scsi/bfa/bfad.c
···
50
50
int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
51
51
int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
52
52
int bfa_io_max_sge = BFAD_IO_MAX_SGE;
53
-
int log_level = 3; /* WARNING log level */
53
+
int bfa_log_level = 3; /* WARNING log level */
54
54
int ioc_auto_recover = BFA_TRUE;
55
55
int bfa_linkup_delay = -1;
56
56
int fdmi_enable = BFA_TRUE;
···
108
108
MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
109
109
module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
110
110
MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
111
-
module_param(log_level, int, S_IRUGO | S_IWUSR);
112
-
MODULE_PARM_DESC(log_level, "Driver log level, default=3, "
111
+
module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
112
+
MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
113
113
"Range[Critical:1|Error:2|Warning:3|Info:4]");
114
114
module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
115
115
MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
···
1112
1112
} else
1113
1113
bfad_os_rport_online_wait(bfad);
1114
1114
1115
-
BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n");
1115
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1116
1116
1117
1117
return BFA_STATUS_OK;
1118
1118
}
+1
-1
drivers/scsi/bfa/bfad_drv.h
+1
-1
drivers/scsi/bfa/bfad_drv.h
+11
-10
drivers/scsi/bfa/bfad_im.c
+11
-10
drivers/scsi/bfa/bfad_im.c
···
225
225
}
226
226
227
227
bfa_trc(bfad, hal_io->iotag);
228
-
BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n",
228
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
229
+
"scsi%d: abort cmnd %p iotag %x\n",
229
230
im_port->shost->host_no, cmnd, hal_io->iotag);
230
231
(void) bfa_ioim_abort(hal_io);
231
232
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
···
242
241
243
242
cmnd->scsi_done(cmnd);
244
243
bfa_trc(bfad, hal_io->iotag);
245
-
BFA_LOG(KERN_INFO, bfad, log_level,
244
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
246
245
"scsi%d: complete abort 0x%p iotag 0x%x\n",
247
246
im_port->shost->host_no, cmnd, hal_io->iotag);
248
247
return SUCCESS;
···
261
260
262
261
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
263
262
if (!tskim) {
264
-
BFA_LOG(KERN_ERR, bfad, log_level,
263
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
265
264
"target reset, fail to allocate tskim\n");
266
265
rc = BFA_STATUS_FAILED;
267
266
goto out;
···
312
311
313
312
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
314
313
if (!tskim) {
315
-
BFA_LOG(KERN_ERR, bfad, log_level,
314
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
316
315
"LUN reset, fail to allocate tskim");
317
316
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
318
317
rc = FAILED;
···
337
336
338
337
task_status = cmnd->SCp.Status >> 1;
339
338
if (task_status != BFI_TSKIM_STS_OK) {
340
-
BFA_LOG(KERN_ERR, bfad, log_level,
339
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
341
340
"LUN reset failure, status: %d\n", task_status);
342
341
rc = FAILED;
343
342
}
···
381
380
382
381
task_status = cmnd->SCp.Status >> 1;
383
382
if (task_status != BFI_TSKIM_STS_OK) {
384
-
BFA_LOG(KERN_ERR, bfad, log_level,
383
+
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
385
384
"target reset failure,"
386
385
" status: %d\n", task_status);
387
386
err_cnt++;
···
461
460
fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
462
461
wwn2str(wwpn_str, wwpn);
463
462
fcid2str(fcid_str, fcid);
464
-
BFA_LOG(KERN_INFO, bfad, log_level,
463
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
465
464
"ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
466
465
port->im_port->shost->host_no,
467
466
fcid_str, wwpn_str);
···
590
589
bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
591
590
{
592
591
bfa_trc(bfad, bfad->inst_no);
593
-
BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n",
592
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n",
594
593
im_port->shost->host_no);
595
594
596
595
fc_remove_host(im_port->shost);
···
1049
1048
fcid2str(fcid_str, fcid);
1050
1049
list_add_tail(&itnim->list_entry,
1051
1050
&im_port->itnim_mapped_list);
1052
-
BFA_LOG(KERN_INFO, bfad, log_level,
1051
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1053
1052
"ITNIM ONLINE Target: %d:0:%d "
1054
1053
"FCID: %s WWPN: %s\n",
1055
1054
im_port->shost->host_no,
···
1082
1081
wwn2str(wwpn_str, wwpn);
1083
1082
fcid2str(fcid_str, fcid);
1084
1083
list_del(&itnim->list_entry);
1085
-
BFA_LOG(KERN_INFO, bfad, log_level,
1084
+
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1086
1085
"ITNIM OFFLINE Target: %d:0:%d "
1087
1086
"FCID: %s WWPN: %s\n",
1088
1087
im_port->shost->host_no,
+1
-2
drivers/scsi/scsi_lib.c
+1
-2
drivers/scsi/scsi_lib.c
···
1637
1637
1638
1638
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1639
1639
1640
-
/* New queue, no concurrency on queue_flags */
1641
1640
if (!shost->use_clustering)
1642
-
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1641
+
q->limits.cluster = 0;
1643
1642
1644
1643
/*
1645
1644
* set a reasonable default alignment on word boundaries: the
+1
drivers/sh/intc/core.c
+1
drivers/sh/intc/core.c
+1
-1
drivers/spi/mpc52xx_spi.c
+1
-1
drivers/spi/mpc52xx_spi.c
+1
-2
drivers/spi/spi.c
+1
-2
drivers/spi/spi.c
···
584
584
list_del(&master->list);
585
585
mutex_unlock(&board_lock);
586
586
587
-
dummy = device_for_each_child(master->dev.parent, &master->dev,
588
-
__unregister);
587
+
dummy = device_for_each_child(&master->dev, NULL, __unregister);
589
588
device_unregister(&master->dev);
590
589
}
591
590
EXPORT_SYMBOL_GPL(spi_unregister_master);
+25
-10
drivers/spi/spi_fsl_espi.c
+25
-10
drivers/spi/spi_fsl_espi.c
···
258
258
return mpc8xxx_spi->count;
259
259
}
260
260
261
-
static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
261
+
static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
262
262
{
263
-
if (cmd[1] && cmd[2] && cmd[3]) {
263
+
if (cmd) {
264
264
cmd[1] = (u8)(addr >> 16);
265
265
cmd[2] = (u8)(addr >> 8);
266
266
cmd[3] = (u8)(addr >> 0);
267
267
}
268
268
}
269
269
270
-
static unsigned int fsl_espi_cmd2addr(u8 *cmd)
270
+
static inline unsigned int fsl_espi_cmd2addr(u8 *cmd)
271
271
{
272
-
if (cmd[1] && cmd[2] && cmd[3])
272
+
if (cmd)
273
273
return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0;
274
274
275
275
return 0;
···
395
395
}
396
396
}
397
397
398
-
addr = fsl_espi_cmd2addr(local_buf);
399
-
addr += pos;
400
-
fsl_espi_addr2cmd(addr, local_buf);
398
+
if (pos > 0) {
399
+
addr = fsl_espi_cmd2addr(local_buf);
400
+
addr += pos;
401
+
fsl_espi_addr2cmd(addr, local_buf);
402
+
}
401
403
402
404
espi_trans->n_tx = n_tx;
403
405
espi_trans->n_rx = trans_len;
···
509
507
510
508
/* We need handle RX first */
511
509
if (events & SPIE_NE) {
512
-
u32 rx_data;
510
+
u32 rx_data, tmp;
511
+
u8 rx_data_8;
513
512
514
513
/* Spin until RX is done */
515
514
while (SPIE_RXCNT(events) < min(4, mspi->len)) {
516
515
cpu_relax();
517
516
events = mpc8xxx_spi_read_reg(®_base->event);
518
517
}
519
-
mspi->len -= 4;
520
518
521
-
rx_data = mpc8xxx_spi_read_reg(®_base->receive);
519
+
if (mspi->len >= 4) {
520
+
rx_data = mpc8xxx_spi_read_reg(®_base->receive);
521
+
} else {
522
+
tmp = mspi->len;
523
+
rx_data = 0;
524
+
while (tmp--) {
525
+
rx_data_8 = in_8((u8 *)®_base->receive);
526
+
rx_data |= (rx_data_8 << (tmp * 8));
527
+
}
528
+
529
+
rx_data <<= (4 - mspi->len) * 8;
530
+
}
531
+
532
+
mspi->len -= 4;
522
533
523
534
if (mspi->rx)
524
535
mspi->get_rx(rx_data, mspi);
+4
-4
drivers/staging/cx25821/cx25821-video.c
+4
-4
drivers/staging/cx25821/cx25821-video.c
···
92
92
return ARRAY_SIZE(formats);
93
93
}
94
94
95
-
struct cx25821_fmt *format_by_fourcc(unsigned int fourcc)
95
+
struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
96
96
{
97
97
unsigned int i;
98
98
···
848
848
pix_format =
849
849
(dev->channels[ch_id].pixel_formats ==
850
850
PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV;
851
-
fh->fmt = format_by_fourcc(pix_format);
851
+
fh->fmt = cx25821_format_by_fourcc(pix_format);
852
852
853
853
v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio);
854
854
···
1010
1010
if (0 != err)
1011
1011
return err;
1012
1012
1013
-
fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
1013
+
fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
1014
1014
fh->vidq.field = f->fmt.pix.field;
1015
1015
1016
1016
/* check if width and height is valid based on set standard */
···
1119
1119
enum v4l2_field field;
1120
1120
unsigned int maxw, maxh;
1121
1121
1122
-
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
1122
+
fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
1123
1123
if (NULL == fmt)
1124
1124
return -EINVAL;
1125
1125
+1
-1
drivers/staging/cx25821/cx25821-video.h
+1
-1
drivers/staging/cx25821/cx25821-video.h
···
87
87
88
88
#define FORMAT_FLAGS_PACKED 0x01
89
89
extern struct cx25821_fmt formats[];
90
-
extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc);
90
+
extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc);
91
91
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
92
92
93
93
extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
+4
-2
drivers/tty/n_gsm.c
+4
-2
drivers/tty/n_gsm.c
···
716
716
if (msg->len < 128)
717
717
*--dp = (msg->len << 1) | EA;
718
718
else {
719
-
*--dp = ((msg->len & 127) << 1) | EA;
720
-
*--dp = (msg->len >> 6) & 0xfe;
719
+
*--dp = (msg->len >> 7); /* bits 7 - 15 */
720
+
*--dp = (msg->len & 127) << 1; /* bits 0 - 6 */
721
721
}
722
722
}
723
723
···
968
968
{
969
969
struct gsm_msg *msg;
970
970
msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
971
+
if (msg == NULL)
972
+
return;
971
973
msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
972
974
msg->data[1] = (dlen << 1) | EA;
973
975
memcpy(msg->data + 2, data, dlen);
+9
-1
drivers/usb/core/Kconfig
+9
-1
drivers/usb/core/Kconfig
···
107
107
If you are unsure about this, say N here.
108
108
109
109
config USB_OTG
110
-
bool
110
+
bool "OTG support"
111
111
depends on USB && EXPERIMENTAL
112
112
depends on USB_SUSPEND
113
113
default n
114
+
help
115
+
The most notable feature of USB OTG is support for a
116
+
"Dual-Role" device, which can act as either a device
117
+
or a host. The initial role is decided by the type of
118
+
plug inserted and can be changed later when two dual
119
+
role devices talk to each other.
114
120
121
+
Select this only if your board has Mini-AB/Micro-AB
122
+
connector.
115
123
116
124
config USB_OTG_WHITELIST
117
125
bool "Rely on OTG Targeted Peripherals List"
+9
-9
drivers/usb/gadget/composite.c
+9
-9
drivers/usb/gadget/composite.c
···
1047
1047
kfree(cdev->req->buf);
1048
1048
usb_ep_free_request(gadget->ep0, cdev->req);
1049
1049
}
1050
+
device_remove_file(&gadget->dev, &dev_attr_suspended);
1050
1051
kfree(cdev);
1051
1052
set_gadget_data(gadget, NULL);
1052
-
device_remove_file(&gadget->dev, &dev_attr_suspended);
1053
1053
composite = NULL;
1054
1054
}
1055
1055
···
1107
1107
*/
1108
1108
usb_ep_autoconfig_reset(cdev->gadget);
1109
1109
1110
-
/* standardized runtime overrides for device ID data */
1111
-
if (idVendor)
1112
-
cdev->desc.idVendor = cpu_to_le16(idVendor);
1113
-
if (idProduct)
1114
-
cdev->desc.idProduct = cpu_to_le16(idProduct);
1115
-
if (bcdDevice)
1116
-
cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
1117
-
1118
1110
/* composite gadget needs to assign strings for whole device (like
1119
1111
* serial number), register function drivers, potentially update
1120
1112
* power state and consumption, etc
···
1117
1125
1118
1126
cdev->desc = *composite->dev;
1119
1127
cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
1128
+
1129
+
/* standardized runtime overrides for device ID data */
1130
+
if (idVendor)
1131
+
cdev->desc.idVendor = cpu_to_le16(idVendor);
1132
+
if (idProduct)
1133
+
cdev->desc.idProduct = cpu_to_le16(idProduct);
1134
+
if (bcdDevice)
1135
+
cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
1120
1136
1121
1137
/* stirng overrides */
1122
1138
if (iManufacturer || !cdev->desc.iManufacturer) {
+15
-10
drivers/usb/host/xhci-mem.c
+15
-10
drivers/usb/host/xhci-mem.c
···
1680
1680
xhci->port_array[i] = (u8) -1;
1681
1681
}
1682
1682
/* FIXME: Should we disable the port? */
1683
+
continue;
1683
1684
}
1684
1685
xhci->port_array[i] = major_revision;
1685
1686
if (major_revision == 0x03)
···
1759
1758
return -ENOMEM;
1760
1759
1761
1760
port_index = 0;
1762
-
for (i = 0; i < num_ports; i++)
1763
-
if (xhci->port_array[i] != 0x03) {
1764
-
xhci->usb2_ports[port_index] =
1765
-
&xhci->op_regs->port_status_base +
1766
-
NUM_PORT_REGS*i;
1767
-
xhci_dbg(xhci, "USB 2.0 port at index %u, "
1768
-
"addr = %p\n", i,
1769
-
xhci->usb2_ports[port_index]);
1770
-
port_index++;
1771
-
}
1761
+
for (i = 0; i < num_ports; i++) {
1762
+
if (xhci->port_array[i] == 0x03 ||
1763
+
xhci->port_array[i] == 0 ||
1764
+
xhci->port_array[i] == -1)
1765
+
continue;
1766
+
1767
+
xhci->usb2_ports[port_index] =
1768
+
&xhci->op_regs->port_status_base +
1769
+
NUM_PORT_REGS*i;
1770
+
xhci_dbg(xhci, "USB 2.0 port at index %u, "
1771
+
"addr = %p\n", i,
1772
+
xhci->usb2_ports[port_index]);
1773
+
port_index++;
1774
+
}
1772
1775
}
1773
1776
if (xhci->num_usb3_ports) {
1774
1777
xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
+3
-1
drivers/usb/misc/uss720.c
+3
-1
drivers/usb/misc/uss720.c
···
3
3
/*
4
4
* uss720.c -- USS720 USB Parport Cable.
5
5
*
6
-
* Copyright (C) 1999, 2005
6
+
* Copyright (C) 1999, 2005, 2010
7
7
* Thomas Sailer (t.sailer@alumni.ethz.ch)
8
8
*
9
9
* This program is free software; you can redistribute it and/or modify
···
776
776
{ USB_DEVICE(0x0557, 0x2001) },
777
777
{ USB_DEVICE(0x0729, 0x1284) },
778
778
{ USB_DEVICE(0x1293, 0x0002) },
779
+
{ USB_DEVICE(0x1293, 0x0002) },
780
+
{ USB_DEVICE(0x050d, 0x0002) },
779
781
{ } /* Terminating entry */
780
782
};
781
783
+1
drivers/usb/serial/ftdi_sio.c
+1
drivers/usb/serial/ftdi_sio.c
···
796
796
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
797
797
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
798
798
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
799
+
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
799
800
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
800
801
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
801
802
{ }, /* Optional parameter entry */
+5
drivers/usb/serial/ftdi_sio_ids.h
+5
drivers/usb/serial/ftdi_sio_ids.h
···
1081
1081
#define MJSG_HD_RADIO_PID 0x937C
1082
1082
1083
1083
/*
1084
+
* D.O.Tec products (http://www.directout.eu)
1085
+
*/
1086
+
#define FTDI_DOTEC_PID 0x9868
1087
+
1088
+
/*
1084
1089
* Xverve Signalyzer tools (http://www.signalyzer.com/)
1085
1090
*/
1086
1091
#define XVERVE_SIGNALYZER_ST_PID 0xBCA0
+7
drivers/usb/storage/unusual_devs.h
+7
drivers/usb/storage/unusual_devs.h
···
481
481
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
482
482
US_FL_MAX_SECTORS_64),
483
483
484
+
/* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */
485
+
UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
486
+
"Samsung",
487
+
"YP-CP3",
488
+
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
489
+
US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
490
+
484
491
/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
485
492
* Device uses standards-violating 32-byte Bulk Command Block Wrappers and
486
493
* reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+1
drivers/video/backlight/cr_bllcd.c
+1
drivers/video/backlight/cr_bllcd.c
+1
-1
drivers/video/fbmem.c
+1
-1
drivers/video/fbmem.c
···
1458
1458
if (gen->base == hw->base)
1459
1459
return true;
1460
1460
/* is the generic aperture base inside the hw base->hw base+size */
1461
-
if (gen->base > hw->base && gen->base <= hw->base + hw->size)
1461
+
if (gen->base > hw->base && gen->base < hw->base + hw->size)
1462
1462
return true;
1463
1463
return false;
1464
1464
}
+3
-6
drivers/video/imxfb.c
+3
-6
drivers/video/imxfb.c
···
53
53
#define LCDC_SIZE 0x04
54
54
#define SIZE_XMAX(x) ((((x) >> 4) & 0x3f) << 20)
55
55
56
-
#ifdef CONFIG_ARCH_MX1
57
-
#define SIZE_YMAX(y) ((y) & 0x1ff)
58
-
#else
59
-
#define SIZE_YMAX(y) ((y) & 0x3ff)
60
-
#endif
56
+
#define YMAX_MASK (cpu_is_mx1() ? 0x1ff : 0x3ff)
57
+
#define SIZE_YMAX(y) ((y) & YMAX_MASK)
61
58
62
59
#define LCDC_VPW 0x08
63
60
#define VPW_VPW(x) ((x) & 0x3ff)
···
620
623
if (var->right_margin > 255)
621
624
printk(KERN_ERR "%s: invalid right_margin %d\n",
622
625
info->fix.id, var->right_margin);
623
-
if (var->yres < 1 || var->yres > 511)
626
+
if (var->yres < 1 || var->yres > YMAX_MASK)
624
627
printk(KERN_ERR "%s: invalid yres %d\n",
625
628
info->fix.id, var->yres);
626
629
if (var->vsync_len > 100)
+2
-2
drivers/video/omap/Kconfig
+2
-2
drivers/video/omap/Kconfig
···
1
1
config FB_OMAP
2
2
tristate "OMAP frame buffer support (EXPERIMENTAL)"
3
-
depends on FB && ARCH_OMAP && (OMAP2_DSS = "n")
4
-
3
+
depends on FB && (OMAP2_DSS = "n")
4
+
depends on ARCH_OMAP1 || ARCH_OMAP2 || ARCH_OMAP3
5
5
select FB_CFB_FILLRECT
6
6
select FB_CFB_COPYAREA
7
7
select FB_CFB_IMAGEBLIT
+2
-2
drivers/video/omap2/vram.c
+2
-2
drivers/video/omap2/vram.c
···
551
551
if (!size)
552
552
return;
553
553
554
-
size = PAGE_ALIGN(size);
554
+
size = ALIGN(size, SZ_2M);
555
555
556
556
if (paddr) {
557
557
if (paddr & ~PAGE_MASK) {
···
576
576
return;
577
577
}
578
578
} else {
579
-
paddr = memblock_alloc(size, PAGE_SIZE);
579
+
paddr = memblock_alloc(size, SZ_2M);
580
580
}
581
581
582
582
memblock_free(paddr, size);
+13
-3
drivers/video/sh_mobile_hdmi.c
+13
-3
drivers/video/sh_mobile_hdmi.c
···
787
787
found_rate_error = rate_error;
788
788
}
789
789
790
+
hdmi->var.width = hdmi->monspec.max_x * 10;
791
+
hdmi->var.height = hdmi->monspec.max_y * 10;
792
+
790
793
/*
791
794
* TODO 1: if no ->info is present, postpone running the config until
792
795
* after ->info first gets registered.
···
963
960
dev_dbg(info->dev, "Old %ux%u, new %ux%u\n",
964
961
mode1.xres, mode1.yres, mode2.xres, mode2.yres);
965
962
966
-
if (fb_mode_is_equal(&mode1, &mode2))
963
+
if (fb_mode_is_equal(&mode1, &mode2)) {
964
+
/* It can be a different monitor with an equal video-mode */
965
+
old_var->width = new_var->width;
966
+
old_var->height = new_var->height;
967
967
return false;
968
+
}
968
969
969
970
dev_dbg(info->dev, "Switching %u -> %u lines\n",
970
971
mode1.yres, mode2.yres);
···
1064
1057
* on, if we run a resume here, the logo disappears
1065
1058
*/
1066
1059
if (lock_fb_info(hdmi->info)) {
1067
-
sh_hdmi_display_on(hdmi, hdmi->info);
1068
-
unlock_fb_info(hdmi->info);
1060
+
struct fb_info *info = hdmi->info;
1061
+
info->var.width = hdmi->var.width;
1062
+
info->var.height = hdmi->var.height;
1063
+
sh_hdmi_display_on(hdmi, info);
1064
+
unlock_fb_info(info);
1069
1065
}
1070
1066
} else {
1071
1067
/* New monitor or have to wake up */
+11
-19
drivers/video/sh_mobile_lcdcfb.c
+11
-19
drivers/video/sh_mobile_lcdcfb.c
···
54
54
};
55
55
#define NR_SHARED_REGS ARRAY_SIZE(lcdc_shared_regs)
56
56
57
-
#define DEFAULT_XRES 1280
58
-
#define DEFAULT_YRES 1024
57
+
#define MAX_XRES 1920
58
+
#define MAX_YRES 1080
59
59
60
60
static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = {
61
61
[LDDCKPAT1R] = 0x400,
···
914
914
{
915
915
struct sh_mobile_lcdc_chan *ch = info->par;
916
916
917
-
if (var->xres < 160 || var->xres > 1920 ||
918
-
var->yres < 120 || var->yres > 1080 ||
919
-
var->left_margin < 32 || var->left_margin > 320 ||
920
-
var->right_margin < 12 || var->right_margin > 240 ||
921
-
var->upper_margin < 12 || var->upper_margin > 120 ||
922
-
var->lower_margin < 1 || var->lower_margin > 64 ||
923
-
var->hsync_len < 32 || var->hsync_len > 240 ||
924
-
var->vsync_len < 2 || var->vsync_len > 64 ||
925
-
var->pixclock < 6000 || var->pixclock > 40000 ||
917
+
if (var->xres > MAX_XRES || var->yres > MAX_YRES ||
926
918
var->xres * var->yres * (ch->cfg.bpp / 8) * 2 > info->fix.smem_len) {
927
-
dev_warn(info->dev, "Invalid info: %u %u %u %u %u %u %u %u %u!\n",
928
-
var->xres, var->yres,
929
-
var->left_margin, var->right_margin,
930
-
var->upper_margin, var->lower_margin,
931
-
var->hsync_len, var->vsync_len,
932
-
var->pixclock);
919
+
dev_warn(info->dev, "Invalid info: %u-%u-%u-%u x %u-%u-%u-%u @ %ukHz!\n",
920
+
var->left_margin, var->xres, var->right_margin, var->hsync_len,
921
+
var->upper_margin, var->yres, var->lower_margin, var->vsync_len,
922
+
PICOS2KHZ(var->pixclock));
933
923
return -EINVAL;
934
924
}
935
925
return 0;
···
1216
1226
}
1217
1227
1218
1228
if (!mode)
1219
-
max_size = DEFAULT_XRES * DEFAULT_YRES;
1229
+
max_size = MAX_XRES * MAX_YRES;
1220
1230
else if (max_cfg)
1221
1231
dev_dbg(&pdev->dev, "Found largest videomode %ux%u\n",
1222
1232
max_cfg->xres, max_cfg->yres);
···
1228
1238
mode = &default_720p;
1229
1239
num_cfg = 1;
1230
1240
} else {
1231
-
num_cfg = ch->cfg.num_cfg;
1241
+
num_cfg = cfg->num_cfg;
1232
1242
}
1233
1243
1234
1244
fb_videomode_to_modelist(mode, num_cfg, &info->modelist);
1235
1245
1236
1246
fb_videomode_to_var(var, mode);
1247
+
var->width = cfg->lcd_size_cfg.width;
1248
+
var->height = cfg->lcd_size_cfg.height;
1237
1249
/* Default Y virtual resolution is 2x panel size */
1238
1250
var->yres_virtual = var->yres * 2;
1239
1251
var->activate = FB_ACTIVATE_NOW;
+1
-1
drivers/watchdog/rdc321x_wdt.c
+1
-1
drivers/watchdog/rdc321x_wdt.c
+1
-1
fs/btrfs/export.c
+1
-1
fs/btrfs/export.c
···
166
166
static struct dentry *btrfs_get_parent(struct dentry *child)
167
167
{
168
168
struct inode *dir = child->d_inode;
169
-
static struct dentry *dentry;
169
+
struct dentry *dentry;
170
170
struct btrfs_root *root = BTRFS_I(dir)->root;
171
171
struct btrfs_path *path;
172
172
struct extent_buffer *leaf;
+2
-1
fs/ceph/dir.c
+2
-1
fs/ceph/dir.c
···
40
40
if (dentry->d_fsdata)
41
41
return 0;
42
42
43
-
if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
43
+
if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
44
+
ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
44
45
dentry->d_op = &ceph_dentry_ops;
45
46
else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
46
47
dentry->d_op = &ceph_snapdir_dentry_ops;
+23
-16
fs/ceph/file.c
+23
-16
fs/ceph/file.c
···
282
282
static int striped_read(struct inode *inode,
283
283
u64 off, u64 len,
284
284
struct page **pages, int num_pages,
285
-
int *checkeof, bool align_to_pages)
285
+
int *checkeof, bool align_to_pages,
286
+
unsigned long buf_align)
286
287
{
287
288
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
288
289
struct ceph_inode_info *ci = ceph_inode(inode);
···
308
307
309
308
more:
310
309
if (align_to_pages)
311
-
page_align = (pos - io_align) & ~PAGE_MASK;
310
+
page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
312
311
else
313
312
page_align = pos & ~PAGE_MASK;
314
313
this_len = left;
···
377
376
struct inode *inode = file->f_dentry->d_inode;
378
377
struct page **pages;
379
378
u64 off = *poff;
380
-
int num_pages = calc_pages_for(off, len);
381
-
int ret;
379
+
int num_pages, ret;
382
380
383
381
dout("sync_read on file %p %llu~%u %s\n", file, off, len,
384
382
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
385
383
386
-
if (file->f_flags & O_DIRECT)
387
-
pages = ceph_get_direct_page_vector(data, num_pages);
388
-
else
384
+
if (file->f_flags & O_DIRECT) {
385
+
num_pages = calc_pages_for((unsigned long)data, len);
386
+
pages = ceph_get_direct_page_vector(data, num_pages, true);
387
+
} else {
388
+
num_pages = calc_pages_for(off, len);
389
389
pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
390
+
}
390
391
if (IS_ERR(pages))
391
392
return PTR_ERR(pages);
392
393
···
403
400
goto done;
404
401
405
402
ret = striped_read(inode, off, len, pages, num_pages, checkeof,
406
-
file->f_flags & O_DIRECT);
403
+
file->f_flags & O_DIRECT,
404
+
(unsigned long)data & ~PAGE_MASK);
407
405
408
406
if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
409
407
ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
···
413
409
414
410
done:
415
411
if (file->f_flags & O_DIRECT)
416
-
ceph_put_page_vector(pages, num_pages);
412
+
ceph_put_page_vector(pages, num_pages, true);
417
413
else
418
414
ceph_release_page_vector(pages, num_pages);
419
415
dout("sync_read result %d\n", ret);
···
460
456
int do_sync = 0;
461
457
int check_caps = 0;
462
458
int page_align, io_align;
459
+
unsigned long buf_align;
463
460
int ret;
464
461
struct timespec mtime = CURRENT_TIME;
465
462
···
476
471
pos = *offset;
477
472
478
473
io_align = pos & ~PAGE_MASK;
474
+
buf_align = (unsigned long)data & ~PAGE_MASK;
479
475
480
476
ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
481
477
if (ret < 0)
···
502
496
*/
503
497
more:
504
498
len = left;
505
-
if (file->f_flags & O_DIRECT)
499
+
if (file->f_flags & O_DIRECT) {
506
500
/* write from beginning of first page, regardless of
507
501
io alignment */
508
-
page_align = (pos - io_align) & ~PAGE_MASK;
509
-
else
502
+
page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
503
+
num_pages = calc_pages_for((unsigned long)data, len);
504
+
} else {
510
505
page_align = pos & ~PAGE_MASK;
506
+
num_pages = calc_pages_for(pos, len);
507
+
}
511
508
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
512
509
ceph_vino(inode), pos, &len,
513
510
CEPH_OSD_OP_WRITE, flags,
···
521
512
if (!req)
522
513
return -ENOMEM;
523
514
524
-
num_pages = calc_pages_for(pos, len);
525
-
526
515
if (file->f_flags & O_DIRECT) {
527
-
pages = ceph_get_direct_page_vector(data, num_pages);
516
+
pages = ceph_get_direct_page_vector(data, num_pages, false);
528
517
if (IS_ERR(pages)) {
529
518
ret = PTR_ERR(pages);
530
519
goto out;
···
572
565
}
573
566
574
567
if (file->f_flags & O_DIRECT)
575
-
ceph_put_page_vector(pages, num_pages);
568
+
ceph_put_page_vector(pages, num_pages, false);
576
569
else if (file->f_flags & O_SYNC)
577
570
ceph_release_page_vector(pages, num_pages);
578
571
+5
fs/ext4/resize.c
+5
fs/ext4/resize.c
···
232
232
GFP_NOFS);
233
233
if (err)
234
234
goto exit_bh;
235
+
for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++)
236
+
ext4_set_bit(bit, bh->b_data);
235
237
236
238
ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
237
239
input->block_bitmap - start);
···
249
247
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
250
248
if (err)
251
249
goto exit_bh;
250
+
for (i = 0, bit = input->inode_table - start;
251
+
i < sbi->s_itb_per_group; i++, bit++)
252
+
ext4_set_bit(bit, bh->b_data);
252
253
253
254
if ((err = extend_or_restart_transaction(handle, 2, bh)))
254
255
goto exit_bh;
+1
-1
fs/logfs/journal.c
+1
-1
fs/logfs/journal.c
···
828
828
super->s_journal_seg[i] = segno;
829
829
super->s_journal_ec[i] = ec;
830
830
logfs_set_segment_reserved(sb, segno);
831
-
err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
831
+
err = btree_insert32(head, segno, (void *)1, GFP_NOFS);
832
832
BUG_ON(err); /* mempool should prevent this */
833
833
err = logfs_erase_segment(sb, segno, 1);
834
834
BUG_ON(err); /* FIXME: remount-ro would be nicer */
+3
fs/logfs/readwrite.c
+3
fs/logfs/readwrite.c
+5
-2
fs/ocfs2/aops.c
+5
-2
fs/ocfs2/aops.c
···
573
573
/* this io's submitter should not have unlocked this before we could */
574
574
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
575
575
576
+
if (ocfs2_iocb_is_sem_locked(iocb)) {
577
+
up_read(&inode->i_alloc_sem);
578
+
ocfs2_iocb_clear_sem_locked(iocb);
579
+
}
580
+
576
581
ocfs2_iocb_clear_rw_locked(iocb);
577
582
578
583
level = ocfs2_iocb_rw_locked_level(iocb);
579
-
if (!level)
580
-
up_read(&inode->i_alloc_sem);
581
584
ocfs2_rw_unlock(inode, level);
582
585
583
586
if (is_async)
+21
-2
fs/ocfs2/aops.h
+21
-2
fs/ocfs2/aops.h
···
68
68
else
69
69
clear_bit(1, (unsigned long *)&iocb->private);
70
70
}
71
+
72
+
/*
73
+
* Using a named enum representing lock types in terms of #N bit stored in
74
+
* iocb->private, which is going to be used for communication bewteen
75
+
* ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
76
+
*/
77
+
enum ocfs2_iocb_lock_bits {
78
+
OCFS2_IOCB_RW_LOCK = 0,
79
+
OCFS2_IOCB_RW_LOCK_LEVEL,
80
+
OCFS2_IOCB_SEM,
81
+
OCFS2_IOCB_NUM_LOCKS
82
+
};
83
+
71
84
#define ocfs2_iocb_clear_rw_locked(iocb) \
72
-
clear_bit(0, (unsigned long *)&iocb->private)
85
+
clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private)
73
86
#define ocfs2_iocb_rw_locked_level(iocb) \
74
-
test_bit(1, (unsigned long *)&iocb->private)
87
+
test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private)
88
+
#define ocfs2_iocb_set_sem_locked(iocb) \
89
+
set_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
90
+
#define ocfs2_iocb_clear_sem_locked(iocb) \
91
+
clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
92
+
#define ocfs2_iocb_is_sem_locked(iocb) \
93
+
test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
75
94
#endif /* OCFS2_FILE_H */
+2
-1
fs/ocfs2/cluster/masklog.c
+2
-1
fs/ocfs2/cluster/masklog.c
···
113
113
define_mask(QUOTA),
114
114
define_mask(REFCOUNT),
115
115
define_mask(BASTS),
116
+
define_mask(RESERVATIONS),
117
+
define_mask(CLUSTER),
116
118
define_mask(ERROR),
117
119
define_mask(NOTICE),
118
120
define_mask(KTHREAD),
119
-
define_mask(RESERVATIONS),
120
121
};
121
122
122
123
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
+8
-7
fs/ocfs2/cluster/masklog.h
+8
-7
fs/ocfs2/cluster/masklog.h
···
81
81
#include <linux/sched.h>
82
82
83
83
/* bits that are frequently given and infrequently matched in the low word */
84
-
/* NOTE: If you add a flag, you need to also update mlog.c! */
84
+
/* NOTE: If you add a flag, you need to also update masklog.c! */
85
85
#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
86
86
#define ML_EXIT 0x0000000000000002ULL /* func call exit */
87
87
#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */
···
114
114
#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
115
115
#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
116
116
#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
117
-
#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */
117
+
#define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */
118
+
#define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */
119
+
#define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */
120
+
118
121
/* bits that are infrequently given and frequently matched in the high word */
119
-
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
120
-
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
121
-
#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */
122
-
#define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */
123
-
#define ML_CLUSTER 0x0000001000000000ULL /* cluster stack */
122
+
#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
123
+
#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
124
+
#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
124
125
125
126
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
126
127
#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
+4
fs/ocfs2/dir.c
+4
fs/ocfs2/dir.c
···
2461
2461
2462
2462
di->i_dx_root = cpu_to_le64(dr_blkno);
2463
2463
2464
+
spin_lock(&OCFS2_I(dir)->ip_lock);
2464
2465
OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2465
2466
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2467
+
spin_unlock(&OCFS2_I(dir)->ip_lock);
2466
2468
2467
2469
ocfs2_journal_dirty(handle, di_bh);
2468
2470
···
4468
4466
goto out_commit;
4469
4467
}
4470
4468
4469
+
spin_lock(&OCFS2_I(dir)->ip_lock);
4471
4470
OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4472
4471
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4472
+
spin_unlock(&OCFS2_I(dir)->ip_lock);
4473
4473
di->i_dx_root = cpu_to_le64(0ULL);
4474
4474
4475
4475
ocfs2_journal_dirty(handle, di_bh);
+27
-13
fs/ocfs2/dlm/dlmmaster.c
+27
-13
fs/ocfs2/dlm/dlmmaster.c
···
2346
2346
*/
2347
2347
static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2348
2348
struct dlm_lock_resource *res,
2349
-
int *numlocks)
2349
+
int *numlocks,
2350
+
int *hasrefs)
2350
2351
{
2351
2352
int ret;
2352
2353
int i;
···
2356
2355
struct dlm_lock *lock;
2357
2356
2358
2357
assert_spin_locked(&res->spinlock);
2358
+
2359
+
*numlocks = 0;
2360
+
*hasrefs = 0;
2359
2361
2360
2362
ret = -EINVAL;
2361
2363
if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
···
2390
2386
}
2391
2387
2392
2388
*numlocks = count;
2393
-
mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2389
+
2390
+
count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2391
+
if (count < O2NM_MAX_NODES)
2392
+
*hasrefs = 1;
2393
+
2394
+
mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name,
2395
+
res->lockname.len, res->lockname.name, *numlocks, *hasrefs);
2394
2396
2395
2397
leave:
2396
2398
return ret;
···
2418
2408
const char *name;
2419
2409
unsigned int namelen;
2420
2410
int mle_added = 0;
2421
-
int numlocks;
2411
+
int numlocks, hasrefs;
2422
2412
int wake = 0;
2423
2413
2424
2414
if (!dlm_grab(dlm))
···
2427
2417
name = res->lockname.name;
2428
2418
namelen = res->lockname.len;
2429
2419
2430
-
mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2420
+
mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
2431
2421
2432
2422
/*
2433
2423
* ensure this lockres is a proper candidate for migration
2434
2424
*/
2435
2425
spin_lock(&res->spinlock);
2436
-
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2426
+
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2437
2427
if (ret < 0) {
2438
2428
spin_unlock(&res->spinlock);
2439
2429
goto leave;
···
2441
2431
spin_unlock(&res->spinlock);
2442
2432
2443
2433
/* no work to do */
2444
-
if (numlocks == 0) {
2445
-
mlog(0, "no locks were found on this lockres! done!\n");
2434
+
if (numlocks == 0 && !hasrefs)
2446
2435
goto leave;
2447
-
}
2448
2436
2449
2437
/*
2450
2438
* preallocate up front
···
2467
2459
* find a node to migrate the lockres to
2468
2460
*/
2469
2461
2470
-
mlog(0, "picking a migration node\n");
2471
2462
spin_lock(&dlm->spinlock);
2472
2463
/* pick a new node */
2473
2464
if (!test_bit(target, dlm->domain_map) ||
2474
2465
target >= O2NM_MAX_NODES) {
2475
2466
target = dlm_pick_migration_target(dlm, res);
2476
2467
}
2477
-
mlog(0, "node %u chosen for migration\n", target);
2468
+
mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name,
2469
+
namelen, name, target);
2478
2470
2479
2471
if (target >= O2NM_MAX_NODES ||
2480
2472
!test_bit(target, dlm->domain_map)) {
···
2675
2667
{
2676
2668
int ret;
2677
2669
int lock_dropped = 0;
2678
-
int numlocks;
2670
+
int numlocks, hasrefs;
2679
2671
2680
2672
spin_lock(&res->spinlock);
2681
2673
if (res->owner != dlm->node_num) {
···
2689
2681
}
2690
2682
2691
2683
/* No need to migrate a lockres having no locks */
2692
-
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2693
-
if (ret >= 0 && numlocks == 0) {
2684
+
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2685
+
if (ret >= 0 && numlocks == 0 && !hasrefs) {
2694
2686
spin_unlock(&res->spinlock);
2695
2687
goto leave;
2696
2688
}
···
2922
2914
}
2923
2915
}
2924
2916
queue++;
2917
+
}
2918
+
2919
+
nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2920
+
if (nodenum < O2NM_MAX_NODES) {
2921
+
spin_unlock(&res->spinlock);
2922
+
return nodenum;
2925
2923
}
2926
2924
spin_unlock(&res->spinlock);
2927
2925
mlog(0, "have not found a suitable target yet! checking domain map\n");
+13
-2
fs/ocfs2/file.c
+13
-2
fs/ocfs2/file.c
···
2241
2241
2242
2242
mutex_lock(&inode->i_mutex);
2243
2243
2244
+
ocfs2_iocb_clear_sem_locked(iocb);
2245
+
2244
2246
relock:
2245
2247
/* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
2246
2248
if (direct_io) {
2247
2249
down_read(&inode->i_alloc_sem);
2248
2250
have_alloc_sem = 1;
2251
+
/* communicate with ocfs2_dio_end_io */
2252
+
ocfs2_iocb_set_sem_locked(iocb);
2249
2253
}
2250
2254
2251
2255
/*
···
2386
2382
ocfs2_rw_unlock(inode, rw_level);
2387
2383
2388
2384
out_sems:
2389
-
if (have_alloc_sem)
2385
+
if (have_alloc_sem) {
2390
2386
up_read(&inode->i_alloc_sem);
2387
+
ocfs2_iocb_clear_sem_locked(iocb);
2388
+
}
2391
2389
2392
2390
mutex_unlock(&inode->i_mutex);
2393
2391
···
2533
2527
goto bail;
2534
2528
}
2535
2529
2530
+
ocfs2_iocb_clear_sem_locked(iocb);
2531
+
2536
2532
/*
2537
2533
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
2538
2534
* need locks to protect pending reads from racing with truncate.
···
2542
2534
if (filp->f_flags & O_DIRECT) {
2543
2535
down_read(&inode->i_alloc_sem);
2544
2536
have_alloc_sem = 1;
2537
+
ocfs2_iocb_set_sem_locked(iocb);
2545
2538
2546
2539
ret = ocfs2_rw_lock(inode, 0);
2547
2540
if (ret < 0) {
···
2584
2575
}
2585
2576
2586
2577
bail:
2587
-
if (have_alloc_sem)
2578
+
if (have_alloc_sem) {
2588
2579
up_read(&inode->i_alloc_sem);
2580
+
ocfs2_iocb_clear_sem_locked(iocb);
2581
+
}
2589
2582
if (rw_level != -1)
2590
2583
ocfs2_rw_unlock(inode, rw_level);
2591
2584
mlog_exit(ret);
+1
-1
fs/ocfs2/ocfs2_fs.h
+1
-1
fs/ocfs2/ocfs2_fs.h
···
350
350
#define OCFS2_LAST_LOCAL_SYSTEM_INODE LOCAL_GROUP_QUOTA_SYSTEM_INODE
351
351
NUM_SYSTEM_INODES
352
352
};
353
-
#define NUM_GLOBAL_SYSTEM_INODES OCFS2_LAST_GLOBAL_SYSTEM_INODE
353
+
#define NUM_GLOBAL_SYSTEM_INODES OCFS2_FIRST_LOCAL_SYSTEM_INODE
354
354
#define NUM_LOCAL_SYSTEM_INODES \
355
355
(NUM_SYSTEM_INODES - OCFS2_FIRST_LOCAL_SYSTEM_INODE)
356
356
+7
-3
include/linux/blkdev.h
+7
-3
include/linux/blkdev.h
···
250
250
251
251
unsigned char misaligned;
252
252
unsigned char discard_misaligned;
253
-
unsigned char no_cluster;
253
+
unsigned char cluster;
254
254
signed char discard_zeroes_data;
255
255
};
256
256
···
380
380
#endif
381
381
};
382
382
383
-
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
384
383
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
385
384
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
386
385
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
···
402
403
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
403
404
404
405
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
405
-
(1 << QUEUE_FLAG_CLUSTER) | \
406
406
(1 << QUEUE_FLAG_STACKABLE) | \
407
407
(1 << QUEUE_FLAG_SAME_COMP) | \
408
408
(1 << QUEUE_FLAG_ADD_RANDOM))
···
507
509
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
508
510
509
511
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
512
+
513
+
static inline unsigned int blk_queue_cluster(struct request_queue *q)
514
+
{
515
+
return q->limits.cluster;
516
+
}
510
517
511
518
/*
512
519
* We regard a request as sync, if either a read or a sync write
···
808
805
extern void blk_cleanup_queue(struct request_queue *);
809
806
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
810
807
extern void blk_queue_bounce_limit(struct request_queue *, u64);
808
+
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
811
809
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
812
810
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
813
811
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+2
include/linux/bootmem.h
+2
include/linux/bootmem.h
···
105
105
106
106
#define alloc_bootmem(x) \
107
107
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
108
+
#define alloc_bootmem_align(x, align) \
109
+
__alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
108
110
#define alloc_bootmem_nopanic(x) \
109
111
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
110
112
#define alloc_bootmem_pages(x) \
+4
-2
include/linux/ceph/libceph.h
+4
-2
include/linux/ceph/libceph.h
···
227
227
extern void ceph_release_page_vector(struct page **pages, int num_pages);
228
228
229
229
extern struct page **ceph_get_direct_page_vector(const char __user *data,
230
-
int num_pages);
231
-
extern void ceph_put_page_vector(struct page **pages, int num_pages);
230
+
int num_pages,
231
+
bool write_page);
232
+
extern void ceph_put_page_vector(struct page **pages, int num_pages,
233
+
bool dirty);
232
234
extern void ceph_release_page_vector(struct page **pages, int num_pages);
233
235
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
234
236
extern int ceph_copy_user_to_page_vector(struct page **pages,
+19
-1
include/linux/cnt32_to_63.h
+19
-1
include/linux/cnt32_to_63.h
···
61
61
*
62
62
* 2) this code must not be preempted for a duration longer than the
63
63
* 32-bit counter half period minus the longest period between two
64
-
* calls to this code.
64
+
* calls to this code;
65
65
*
66
66
* Those requirements ensure proper update to the state bit in memory.
67
67
* This is usually not a problem in practice, but if it is then a kernel
68
68
* timer should be scheduled to manage for this code to be executed often
69
69
* enough.
70
+
*
71
+
* And finally:
72
+
*
73
+
* 3) the cnt_lo argument must be seen as a globally incrementing value,
74
+
* meaning that it should be a direct reference to the counter data which
75
+
* can be evaluated according to a specific ordering within the macro,
76
+
* and not the result of a previous evaluation stored in a variable.
77
+
*
78
+
* For example, this is wrong:
79
+
*
80
+
* u32 partial = get_hw_count();
81
+
* u64 full = cnt32_to_63(partial);
82
+
* return full;
83
+
*
84
+
* This is fine:
85
+
*
86
+
* u64 full = cnt32_to_63(get_hw_count());
87
+
* return full;
70
88
*
71
89
* Note that the top bit (bit 63) in the returned value should be considered
72
90
* as garbage. It is not cleared here because callers are likely to use a
+1
-1
include/linux/ioport.h
+1
-1
include/linux/ioport.h
···
112
112
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
113
113
extern struct resource ioport_resource;
114
114
extern struct resource iomem_resource;
115
-
extern int resource_alloc_from_bottom;
116
115
117
116
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
118
117
extern int request_resource(struct resource *root, struct resource *new);
···
123
124
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
124
125
extern int insert_resource(struct resource *parent, struct resource *new);
125
126
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
127
+
extern void arch_remove_reservations(struct resource *avail);
126
128
extern int allocate_resource(struct resource *root, struct resource *new,
127
129
resource_size_t size, resource_size_t min,
128
130
resource_size_t max, resource_size_t align,
+34
-9
include/linux/kthread.h
+34
-9
include/linux/kthread.h
···
81
81
#define DEFINE_KTHREAD_WORK(work, fn) \
82
82
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
83
83
84
-
static inline void init_kthread_worker(struct kthread_worker *worker)
85
-
{
86
-
*worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker);
87
-
}
84
+
/*
85
+
* kthread_worker.lock and kthread_work.done need their own lockdep class
86
+
* keys if they are defined on stack with lockdep enabled. Use the
87
+
* following macros when defining them on stack.
88
+
*/
89
+
#ifdef CONFIG_LOCKDEP
90
+
# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
91
+
({ init_kthread_worker(&worker); worker; })
92
+
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
93
+
struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
94
+
# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \
95
+
({ init_kthread_work((&work), fn); work; })
96
+
# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \
97
+
struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
98
+
#else
99
+
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
100
+
# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
101
+
#endif
88
102
89
-
static inline void init_kthread_work(struct kthread_work *work,
90
-
kthread_work_func_t fn)
91
-
{
92
-
*work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn);
93
-
}
103
+
extern void __init_kthread_worker(struct kthread_worker *worker,
104
+
const char *name, struct lock_class_key *key);
105
+
106
+
#define init_kthread_worker(worker) \
107
+
do { \
108
+
static struct lock_class_key __key; \
109
+
__init_kthread_worker((worker), "("#worker")->lock", &__key); \
110
+
} while (0)
111
+
112
+
#define init_kthread_work(work, fn) \
113
+
do { \
114
+
memset((work), 0, sizeof(struct kthread_work)); \
115
+
INIT_LIST_HEAD(&(work)->node); \
116
+
(work)->func = (fn); \
117
+
init_waitqueue_head(&(work)->done); \
118
+
} while (0)
94
119
95
120
int kthread_worker_fn(void *worker_ptr);
96
121
+1
-1
include/linux/netlink.h
+1
-1
include/linux/netlink.h
···
70
70
Check NLM_F_EXCL
71
71
*/
72
72
73
-
#define NLMSG_ALIGNTO 4
73
+
#define NLMSG_ALIGNTO 4U
74
74
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
75
75
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
76
76
#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
+1
include/linux/perf_event.h
+1
include/linux/perf_event.h
+1
-1
include/linux/sched.h
+1
-1
include/linux/sched.h
+2
-1
include/linux/taskstats.h
+2
-1
include/linux/taskstats.h
···
33
33
*/
34
34
35
35
36
-
#define TASKSTATS_VERSION 7
36
+
#define TASKSTATS_VERSION 8
37
37
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
38
38
* in linux/sched.h */
39
39
···
188
188
TASKSTATS_TYPE_STATS, /* taskstats structure */
189
189
TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */
190
190
TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */
191
+
TASKSTATS_TYPE_NULL, /* contains nothing */
191
192
__TASKSTATS_TYPE_MAX,
192
193
};
193
194
+3
-3
include/linux/unaligned/packed_struct.h
+3
-3
include/linux/unaligned/packed_struct.h
···
3
3
4
4
#include <linux/kernel.h>
5
5
6
-
struct __una_u16 { u16 x __attribute__((packed)); };
7
-
struct __una_u32 { u32 x __attribute__((packed)); };
8
-
struct __una_u64 { u64 x __attribute__((packed)); };
6
+
struct __una_u16 { u16 x; } __attribute__((packed));
7
+
struct __una_u32 { u32 x; } __attribute__((packed));
8
+
struct __una_u64 { u64 x; } __attribute__((packed));
9
9
10
10
static inline u16 __get_unaligned_cpu16(const void *p)
11
11
{
+1
-1
include/media/saa7146.h
+1
-1
include/media/saa7146.h
···
161
161
extern struct mutex saa7146_devices_lock;
162
162
int saa7146_register_extension(struct saa7146_extension*);
163
163
int saa7146_unregister_extension(struct saa7146_extension*);
164
-
struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc);
164
+
struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
165
165
int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
166
166
void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
167
167
int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
-1
include/net/flow.h
-1
include/net/flow.h
+10
include/net/ip6_route.h
+10
include/net/ip6_route.h
···
164
164
return rt->rt6i_flags & RTF_LOCAL;
165
165
}
166
166
167
+
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
168
+
169
+
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
170
+
{
171
+
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
172
+
173
+
return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
174
+
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
175
+
}
176
+
167
177
#endif
168
178
#endif
+24
-4
include/net/mac80211.h
+24
-4
include/net/mac80211.h
···
2024
2024
*
2025
2025
* This function may not be called in IRQ context. Calls to this function
2026
2026
* for a single hardware must be synchronized against each other. Calls
2027
-
* to this function and ieee80211_tx_status_irqsafe() may not be mixed
2028
-
* for a single hardware.
2027
+
* to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
2028
+
* may not be mixed for a single hardware.
2029
2029
*
2030
2030
* @hw: the hardware the frame was transmitted by
2031
2031
* @skb: the frame that was transmitted, owned by mac80211 after this call
···
2034
2034
struct sk_buff *skb);
2035
2035
2036
2036
/**
2037
+
* ieee80211_tx_status_ni - transmit status callback (in process context)
2038
+
*
2039
+
* Like ieee80211_tx_status() but can be called in process context.
2040
+
*
2041
+
* Calls to this function, ieee80211_tx_status() and
2042
+
* ieee80211_tx_status_irqsafe() may not be mixed
2043
+
* for a single hardware.
2044
+
*
2045
+
* @hw: the hardware the frame was transmitted by
2046
+
* @skb: the frame that was transmitted, owned by mac80211 after this call
2047
+
*/
2048
+
static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
2049
+
struct sk_buff *skb)
2050
+
{
2051
+
local_bh_disable();
2052
+
ieee80211_tx_status(hw, skb);
2053
+
local_bh_enable();
2054
+
}
2055
+
2056
+
/**
2037
2057
* ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
2038
2058
*
2039
2059
* Like ieee80211_tx_status() but can be called in IRQ context
2040
2060
* (internally defers to a tasklet.)
2041
2061
*
2042
-
* Calls to this function and ieee80211_tx_status() may not be mixed for a
2043
-
* single hardware.
2062
+
* Calls to this function, ieee80211_tx_status() and
2063
+
* ieee80211_tx_status_ni() may not be mixed for a single hardware.
2044
2064
*
2045
2065
* @hw: the hardware the frame was transmitted by
2046
2066
* @skb: the frame that was transmitted, owned by mac80211 after this call
+3
-1
include/net/pkt_cls.h
+3
-1
include/net/pkt_cls.h
···
323
323
static inline int tcf_valid_offset(const struct sk_buff *skb,
324
324
const unsigned char *ptr, const int len)
325
325
{
326
-
return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head);
326
+
return likely((ptr + len) <= skb_tail_pointer(skb) &&
327
+
ptr >= skb->head &&
328
+
(ptr <= (ptr + len)));
327
329
}
328
330
329
331
#ifdef CONFIG_NET_CLS_IND
+1
-5
include/net/sch_generic.h
+1
-5
include/net/sch_generic.h
···
610
610
{
611
611
struct sk_buff *n;
612
612
613
-
if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
614
-
!skb_shared(skb))
615
-
n = skb_get(skb);
616
-
else
617
-
n = skb_clone(skb, gfp_mask);
613
+
n = skb_clone(skb, gfp_mask);
618
614
619
615
if (n) {
620
616
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
+3
include/net/sock.h
+3
include/net/sock.h
···
754
754
void (*unhash)(struct sock *sk);
755
755
void (*rehash)(struct sock *sk);
756
756
int (*get_port)(struct sock *sk, unsigned short snum);
757
+
void (*clear_sk)(struct sock *sk, int size);
757
758
758
759
/* Keeping track of sockets in use */
759
760
#ifdef CONFIG_PROC_FS
···
852
851
sk->sk_prot->unhash(sk);
853
852
sk->sk_prot->hash(sk);
854
853
}
854
+
855
+
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
855
856
856
857
/* About 10 seconds */
857
858
#define SOCK_DESTROY_TIME (10*HZ)
+1
kernel/fork.c
+1
kernel/fork.c
+11
kernel/kthread.c
+11
kernel/kthread.c
···
265
265
return 0;
266
266
}
267
267
268
+
void __init_kthread_worker(struct kthread_worker *worker,
269
+
const char *name,
270
+
struct lock_class_key *key)
271
+
{
272
+
spin_lock_init(&worker->lock);
273
+
lockdep_set_class_and_name(&worker->lock, key, name);
274
+
INIT_LIST_HEAD(&worker->work_list);
275
+
worker->task = NULL;
276
+
}
277
+
EXPORT_SYMBOL_GPL(__init_kthread_worker);
278
+
268
279
/**
269
280
* kthread_worker_fn - kthread function to process kthread_worker
270
281
* @worker_ptr: pointer to initialized kthread_worker
+30
-7
kernel/perf_event.c
+30
-7
kernel/perf_event.c
···
3824
3824
rcu_read_lock();
3825
3825
list_for_each_entry_rcu(pmu, &pmus, entry) {
3826
3826
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3827
+
if (cpuctx->active_pmu != pmu)
3828
+
goto next;
3827
3829
perf_event_task_ctx(&cpuctx->ctx, task_event);
3828
3830
3829
3831
ctx = task_event->task_ctx;
···
3961
3959
rcu_read_lock();
3962
3960
list_for_each_entry_rcu(pmu, &pmus, entry) {
3963
3961
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3962
+
if (cpuctx->active_pmu != pmu)
3963
+
goto next;
3964
3964
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3965
3965
3966
3966
ctxn = pmu->task_ctx_nr;
···
4148
4144
rcu_read_lock();
4149
4145
list_for_each_entry_rcu(pmu, &pmus, entry) {
4150
4146
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4147
+
if (cpuctx->active_pmu != pmu)
4148
+
goto next;
4151
4149
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4152
4150
vma->vm_flags & VM_EXEC);
4153
4151
···
4719
4713
break;
4720
4714
}
4721
4715
4722
-
if (event_id > PERF_COUNT_SW_MAX)
4716
+
if (event_id >= PERF_COUNT_SW_MAX)
4723
4717
return -ENOENT;
4724
4718
4725
4719
if (!event->parent) {
···
5151
5145
return NULL;
5152
5146
}
5153
5147
5154
-
static void free_pmu_context(void * __percpu cpu_context)
5148
+
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5155
5149
{
5156
-
struct pmu *pmu;
5150
+
int cpu;
5151
+
5152
+
for_each_possible_cpu(cpu) {
5153
+
struct perf_cpu_context *cpuctx;
5154
+
5155
+
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5156
+
5157
+
if (cpuctx->active_pmu == old_pmu)
5158
+
cpuctx->active_pmu = pmu;
5159
+
}
5160
+
}
5161
+
5162
+
static void free_pmu_context(struct pmu *pmu)
5163
+
{
5164
+
struct pmu *i;
5157
5165
5158
5166
mutex_lock(&pmus_lock);
5159
5167
/*
5160
5168
* Like a real lame refcount.
5161
5169
*/
5162
-
list_for_each_entry(pmu, &pmus, entry) {
5163
-
if (pmu->pmu_cpu_context == cpu_context)
5170
+
list_for_each_entry(i, &pmus, entry) {
5171
+
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5172
+
update_pmu_context(i, pmu);
5164
5173
goto out;
5174
+
}
5165
5175
}
5166
5176
5167
-
free_percpu(cpu_context);
5177
+
free_percpu(pmu->pmu_cpu_context);
5168
5178
out:
5169
5179
mutex_unlock(&pmus_lock);
5170
5180
}
···
5212
5190
cpuctx->ctx.pmu = pmu;
5213
5191
cpuctx->jiffies_interval = 1;
5214
5192
INIT_LIST_HEAD(&cpuctx->rotation_list);
5193
+
cpuctx->active_pmu = pmu;
5215
5194
}
5216
5195
5217
5196
got_cpu_context:
···
5264
5241
synchronize_rcu();
5265
5242
5266
5243
free_percpu(pmu->pmu_disable_count);
5267
-
free_pmu_context(pmu->pmu_cpu_context);
5244
+
free_pmu_context(pmu);
5268
5245
}
5269
5246
5270
5247
struct pmu *perf_init_event(struct perf_event *event)
+10
-94
kernel/resource.c
+10
-94
kernel/resource.c
···
40
40
41
41
static DEFINE_RWLOCK(resource_lock);
42
42
43
-
/*
44
-
* By default, we allocate free space bottom-up. The architecture can request
45
-
* top-down by clearing this flag. The user can override the architecture's
46
-
* choice with the "resource_alloc_from_bottom" kernel boot option, but that
47
-
* should only be a debugging tool.
48
-
*/
49
-
int resource_alloc_from_bottom = 1;
50
-
51
-
static __init int setup_alloc_from_bottom(char *s)
52
-
{
53
-
printk(KERN_INFO
54
-
"resource: allocating from bottom-up; please report a bug\n");
55
-
resource_alloc_from_bottom = 1;
56
-
return 0;
57
-
}
58
-
early_param("resource_alloc_from_bottom", setup_alloc_from_bottom);
59
-
60
43
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
61
44
{
62
45
struct resource *p = v;
···
357
374
return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
358
375
}
359
376
377
+
void __weak arch_remove_reservations(struct resource *avail)
378
+
{
379
+
}
380
+
360
381
static resource_size_t simple_align_resource(void *data,
361
382
const struct resource *avail,
362
383
resource_size_t size,
···
384
397
}
385
398
386
399
/*
387
-
* Find the resource before "child" in the sibling list of "root" children.
388
-
*/
389
-
static struct resource *find_sibling_prev(struct resource *root, struct resource *child)
390
-
{
391
-
struct resource *this;
392
-
393
-
for (this = root->child; this; this = this->sibling)
394
-
if (this->sibling == child)
395
-
return this;
396
-
397
-
return NULL;
398
-
}
399
-
400
-
/*
401
400
* Find empty slot in the resource tree given range and alignment.
402
-
* This version allocates from the end of the root resource first.
403
-
*/
404
-
static int find_resource_from_top(struct resource *root, struct resource *new,
405
-
resource_size_t size, resource_size_t min,
406
-
resource_size_t max, resource_size_t align,
407
-
resource_size_t (*alignf)(void *,
408
-
const struct resource *,
409
-
resource_size_t,
410
-
resource_size_t),
411
-
void *alignf_data)
412
-
{
413
-
struct resource *this;
414
-
struct resource tmp, avail, alloc;
415
-
416
-
tmp.start = root->end;
417
-
tmp.end = root->end;
418
-
419
-
this = find_sibling_prev(root, NULL);
420
-
for (;;) {
421
-
if (this) {
422
-
if (this->end < root->end)
423
-
tmp.start = this->end + 1;
424
-
} else
425
-
tmp.start = root->start;
426
-
427
-
resource_clip(&tmp, min, max);
428
-
429
-
/* Check for overflow after ALIGN() */
430
-
avail = *new;
431
-
avail.start = ALIGN(tmp.start, align);
432
-
avail.end = tmp.end;
433
-
if (avail.start >= tmp.start) {
434
-
alloc.start = alignf(alignf_data, &avail, size, align);
435
-
alloc.end = alloc.start + size - 1;
436
-
if (resource_contains(&avail, &alloc)) {
437
-
new->start = alloc.start;
438
-
new->end = alloc.end;
439
-
return 0;
440
-
}
441
-
}
442
-
443
-
if (!this || this->start == root->start)
444
-
break;
445
-
446
-
tmp.end = this->start - 1;
447
-
this = find_sibling_prev(root, this);
448
-
}
449
-
return -EBUSY;
450
-
}
451
-
452
-
/*
453
-
* Find empty slot in the resource tree given range and alignment.
454
-
* This version allocates from the beginning of the root resource first.
455
401
*/
456
402
static int find_resource(struct resource *root, struct resource *new,
457
403
resource_size_t size, resource_size_t min,
···
398
478
struct resource *this = root->child;
399
479
struct resource tmp = *new, avail, alloc;
400
480
481
+
tmp.flags = new->flags;
401
482
tmp.start = root->start;
402
483
/*
403
-
* Skip past an allocated resource that starts at 0, since the
404
-
* assignment of this->start - 1 to tmp->end below would cause an
405
-
* underflow.
484
+
* Skip past an allocated resource that starts at 0, since the assignment
485
+
* of this->start - 1 to tmp->end below would cause an underflow.
406
486
*/
407
487
if (this && this->start == 0) {
408
488
tmp.start = this->end + 1;
409
489
this = this->sibling;
410
490
}
411
-
for (;;) {
491
+
for(;;) {
412
492
if (this)
413
493
tmp.end = this->start - 1;
414
494
else
415
495
tmp.end = root->end;
416
496
417
497
resource_clip(&tmp, min, max);
498
+
arch_remove_reservations(&tmp);
418
499
419
500
/* Check for overflow after ALIGN() */
420
501
avail = *new;
···
430
509
return 0;
431
510
}
432
511
}
433
-
434
512
if (!this)
435
513
break;
436
-
437
514
tmp.start = this->end + 1;
438
515
this = this->sibling;
439
516
}
···
464
545
alignf = simple_align_resource;
465
546
466
547
write_lock(&resource_lock);
467
-
if (resource_alloc_from_bottom)
468
-
err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
469
-
else
470
-
err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data);
548
+
err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
471
549
if (err >= 0 && __request_resource(root, new))
472
550
err = -EBUSY;
473
551
write_unlock(&resource_lock);
+238
-53
kernel/sched.c
+238
-53
kernel/sched.c
···
636
636
637
637
#endif /* CONFIG_CGROUP_SCHED */
638
638
639
-
static u64 irq_time_cpu(int cpu);
640
-
static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
639
+
static void update_rq_clock_task(struct rq *rq, s64 delta);
641
640
642
-
inline void update_rq_clock(struct rq *rq)
641
+
static void update_rq_clock(struct rq *rq)
643
642
{
644
-
if (!rq->skip_clock_update) {
645
-
int cpu = cpu_of(rq);
646
-
u64 irq_time;
643
+
s64 delta;
647
644
648
-
rq->clock = sched_clock_cpu(cpu);
649
-
irq_time = irq_time_cpu(cpu);
650
-
if (rq->clock - irq_time > rq->clock_task)
651
-
rq->clock_task = rq->clock - irq_time;
645
+
if (rq->skip_clock_update)
646
+
return;
652
647
653
-
sched_irq_time_avg_update(rq, irq_time);
654
-
}
648
+
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
649
+
rq->clock += delta;
650
+
update_rq_clock_task(rq, delta);
655
651
}
656
652
657
653
/*
···
1920
1924
* They are read and saved off onto struct rq in update_rq_clock().
1921
1925
* This may result in other CPU reading this CPU's irq time and can
1922
1926
* race with irq/account_system_vtime on this CPU. We would either get old
1923
-
* or new value (or semi updated value on 32 bit) with a side effect of
1924
-
* accounting a slice of irq time to wrong task when irq is in progress
1925
-
* while we read rq->clock. That is a worthy compromise in place of having
1926
-
* locks on each irq in account_system_time.
1927
+
* or new value with a side effect of accounting a slice of irq time to wrong
1928
+
* task when irq is in progress while we read rq->clock. That is a worthy
1929
+
* compromise in place of having locks on each irq in account_system_time.
1927
1930
*/
1928
1931
static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1929
1932
static DEFINE_PER_CPU(u64, cpu_softirq_time);
···
1940
1945
sched_clock_irqtime = 0;
1941
1946
}
1942
1947
1943
-
static u64 irq_time_cpu(int cpu)
1944
-
{
1945
-
if (!sched_clock_irqtime)
1946
-
return 0;
1948
+
#ifndef CONFIG_64BIT
1949
+
static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1947
1950
1948
-
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1951
+
static inline void irq_time_write_begin(void)
1952
+
{
1953
+
__this_cpu_inc(irq_time_seq.sequence);
1954
+
smp_wmb();
1949
1955
}
1950
1956
1957
+
static inline void irq_time_write_end(void)
1958
+
{
1959
+
smp_wmb();
1960
+
__this_cpu_inc(irq_time_seq.sequence);
1961
+
}
1962
+
1963
+
static inline u64 irq_time_read(int cpu)
1964
+
{
1965
+
u64 irq_time;
1966
+
unsigned seq;
1967
+
1968
+
do {
1969
+
seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1970
+
irq_time = per_cpu(cpu_softirq_time, cpu) +
1971
+
per_cpu(cpu_hardirq_time, cpu);
1972
+
} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1973
+
1974
+
return irq_time;
1975
+
}
1976
+
#else /* CONFIG_64BIT */
1977
+
static inline void irq_time_write_begin(void)
1978
+
{
1979
+
}
1980
+
1981
+
static inline void irq_time_write_end(void)
1982
+
{
1983
+
}
1984
+
1985
+
static inline u64 irq_time_read(int cpu)
1986
+
{
1987
+
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1988
+
}
1989
+
#endif /* CONFIG_64BIT */
1990
+
1991
+
/*
1992
+
* Called before incrementing preempt_count on {soft,}irq_enter
1993
+
* and before decrementing preempt_count on {soft,}irq_exit.
1994
+
*/
1951
1995
void account_system_vtime(struct task_struct *curr)
1952
1996
{
1953
1997
unsigned long flags;
1998
+
s64 delta;
1954
1999
int cpu;
1955
-
u64 now, delta;
1956
2000
1957
2001
if (!sched_clock_irqtime)
1958
2002
return;
···
1999
1965
local_irq_save(flags);
2000
1966
2001
1967
cpu = smp_processor_id();
2002
-
now = sched_clock_cpu(cpu);
2003
-
delta = now - per_cpu(irq_start_time, cpu);
2004
-
per_cpu(irq_start_time, cpu) = now;
1968
+
delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1969
+
__this_cpu_add(irq_start_time, delta);
1970
+
1971
+
irq_time_write_begin();
2005
1972
/*
2006
1973
* We do not account for softirq time from ksoftirqd here.
2007
1974
* We want to continue accounting softirq time to ksoftirqd thread
···
2010
1975
* that do not consume any time, but still wants to run.
2011
1976
*/
2012
1977
if (hardirq_count())
2013
-
per_cpu(cpu_hardirq_time, cpu) += delta;
1978
+
__this_cpu_add(cpu_hardirq_time, delta);
2014
1979
else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
2015
-
per_cpu(cpu_softirq_time, cpu) += delta;
1980
+
__this_cpu_add(cpu_softirq_time, delta);
2016
1981
1982
+
irq_time_write_end();
2017
1983
local_irq_restore(flags);
2018
1984
}
2019
1985
EXPORT_SYMBOL_GPL(account_system_vtime);
2020
1986
2021
-
static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
1987
+
static void update_rq_clock_task(struct rq *rq, s64 delta)
2022
1988
{
2023
-
if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
2024
-
u64 delta_irq = curr_irq_time - rq->prev_irq_time;
2025
-
rq->prev_irq_time = curr_irq_time;
2026
-
sched_rt_avg_update(rq, delta_irq);
2027
-
}
1989
+
s64 irq_delta;
1990
+
1991
+
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
1992
+
1993
+
/*
1994
+
* Since irq_time is only updated on {soft,}irq_exit, we might run into
1995
+
* this case when a previous update_rq_clock() happened inside a
1996
+
* {soft,}irq region.
1997
+
*
1998
+
* When this happens, we stop ->clock_task and only update the
1999
+
* prev_irq_time stamp to account for the part that fit, so that a next
2000
+
* update will consume the rest. This ensures ->clock_task is
2001
+
* monotonic.
2002
+
*
2003
+
* It does however cause some slight miss-attribution of {soft,}irq
2004
+
* time, a more accurate solution would be to update the irq_time using
2005
+
* the current rq->clock timestamp, except that would require using
2006
+
* atomic ops.
2007
+
*/
2008
+
if (irq_delta > delta)
2009
+
irq_delta = delta;
2010
+
2011
+
rq->prev_irq_time += irq_delta;
2012
+
delta -= irq_delta;
2013
+
rq->clock_task += delta;
2014
+
2015
+
if (irq_delta && sched_feat(NONIRQ_POWER))
2016
+
sched_rt_avg_update(rq, irq_delta);
2028
2017
}
2029
2018
2030
-
#else
2019
+
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
2031
2020
2032
-
static u64 irq_time_cpu(int cpu)
2021
+
static void update_rq_clock_task(struct rq *rq, s64 delta)
2033
2022
{
2034
-
return 0;
2023
+
rq->clock_task += delta;
2035
2024
}
2036
2025
2037
-
static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
2038
-
2039
-
#endif
2026
+
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2040
2027
2041
2028
#include "sched_idletask.c"
2042
2029
#include "sched_fair.c"
···
2186
2129
* A queue event has occurred, and we're going to schedule. In
2187
2130
* this case, we can save a useless back to back clock update.
2188
2131
*/
2189
-
if (test_tsk_need_resched(rq->curr))
2132
+
if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
2190
2133
rq->skip_clock_update = 1;
2191
2134
}
2192
2135
···
3176
3119
return delta;
3177
3120
}
3178
3121
3122
+
static unsigned long
3123
+
calc_load(unsigned long load, unsigned long exp, unsigned long active)
3124
+
{
3125
+
load *= exp;
3126
+
load += active * (FIXED_1 - exp);
3127
+
load += 1UL << (FSHIFT - 1);
3128
+
return load >> FSHIFT;
3129
+
}
3130
+
3179
3131
#ifdef CONFIG_NO_HZ
3180
3132
/*
3181
3133
* For NO_HZ we delay the active fold to the next LOAD_FREQ update.
···
3214
3148
3215
3149
return delta;
3216
3150
}
3151
+
3152
+
/**
3153
+
* fixed_power_int - compute: x^n, in O(log n) time
3154
+
*
3155
+
* @x: base of the power
3156
+
* @frac_bits: fractional bits of @x
3157
+
* @n: power to raise @x to.
3158
+
*
3159
+
* By exploiting the relation between the definition of the natural power
3160
+
* function: x^n := x*x*...*x (x multiplied by itself for n times), and
3161
+
* the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3162
+
* (where: n_i \elem {0, 1}, the binary vector representing n),
3163
+
* we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3164
+
* of course trivially computable in O(log_2 n), the length of our binary
3165
+
* vector.
3166
+
*/
3167
+
static unsigned long
3168
+
fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3169
+
{
3170
+
unsigned long result = 1UL << frac_bits;
3171
+
3172
+
if (n) for (;;) {
3173
+
if (n & 1) {
3174
+
result *= x;
3175
+
result += 1UL << (frac_bits - 1);
3176
+
result >>= frac_bits;
3177
+
}
3178
+
n >>= 1;
3179
+
if (!n)
3180
+
break;
3181
+
x *= x;
3182
+
x += 1UL << (frac_bits - 1);
3183
+
x >>= frac_bits;
3184
+
}
3185
+
3186
+
return result;
3187
+
}
3188
+
3189
+
/*
3190
+
* a1 = a0 * e + a * (1 - e)
3191
+
*
3192
+
* a2 = a1 * e + a * (1 - e)
3193
+
* = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3194
+
* = a0 * e^2 + a * (1 - e) * (1 + e)
3195
+
*
3196
+
* a3 = a2 * e + a * (1 - e)
3197
+
* = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3198
+
* = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3199
+
*
3200
+
* ...
3201
+
*
3202
+
* an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3203
+
* = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3204
+
* = a0 * e^n + a * (1 - e^n)
3205
+
*
3206
+
* [1] application of the geometric series:
3207
+
*
3208
+
* n 1 - x^(n+1)
3209
+
* S_n := \Sum x^i = -------------
3210
+
* i=0 1 - x
3211
+
*/
3212
+
static unsigned long
3213
+
calc_load_n(unsigned long load, unsigned long exp,
3214
+
unsigned long active, unsigned int n)
3215
+
{
3216
+
3217
+
return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3218
+
}
3219
+
3220
+
/*
3221
+
* NO_HZ can leave us missing all per-cpu ticks calling
3222
+
* calc_load_account_active(), but since an idle CPU folds its delta into
3223
+
* calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3224
+
* in the pending idle delta if our idle period crossed a load cycle boundary.
3225
+
*
3226
+
* Once we've updated the global active value, we need to apply the exponential
3227
+
* weights adjusted to the number of cycles missed.
3228
+
*/
3229
+
static void calc_global_nohz(unsigned long ticks)
3230
+
{
3231
+
long delta, active, n;
3232
+
3233
+
if (time_before(jiffies, calc_load_update))
3234
+
return;
3235
+
3236
+
/*
3237
+
* If we crossed a calc_load_update boundary, make sure to fold
3238
+
* any pending idle changes, the respective CPUs might have
3239
+
* missed the tick driven calc_load_account_active() update
3240
+
* due to NO_HZ.
3241
+
*/
3242
+
delta = calc_load_fold_idle();
3243
+
if (delta)
3244
+
atomic_long_add(delta, &calc_load_tasks);
3245
+
3246
+
/*
3247
+
* If we were idle for multiple load cycles, apply them.
3248
+
*/
3249
+
if (ticks >= LOAD_FREQ) {
3250
+
n = ticks / LOAD_FREQ;
3251
+
3252
+
active = atomic_long_read(&calc_load_tasks);
3253
+
active = active > 0 ? active * FIXED_1 : 0;
3254
+
3255
+
avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3256
+
avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3257
+
avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3258
+
3259
+
calc_load_update += n * LOAD_FREQ;
3260
+
}
3261
+
3262
+
/*
3263
+
* Its possible the remainder of the above division also crosses
3264
+
* a LOAD_FREQ period, the regular check in calc_global_load()
3265
+
* which comes after this will take care of that.
3266
+
*
3267
+
* Consider us being 11 ticks before a cycle completion, and us
3268
+
* sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3269
+
* age us 4 cycles, and the test in calc_global_load() will
3270
+
* pick up the final one.
3271
+
*/
3272
+
}
3217
3273
#else
3218
3274
static void calc_load_account_idle(struct rq *this_rq)
3219
3275
{
···
3344
3156
static inline long calc_load_fold_idle(void)
3345
3157
{
3346
3158
return 0;
3159
+
}
3160
+
3161
+
static void calc_global_nohz(unsigned long ticks)
3162
+
{
3347
3163
}
3348
3164
#endif
3349
3165
···
3366
3174
loads[2] = (avenrun[2] + offset) << shift;
3367
3175
}
3368
3176
3369
-
static unsigned long
3370
-
calc_load(unsigned long load, unsigned long exp, unsigned long active)
3371
-
{
3372
-
load *= exp;
3373
-
load += active * (FIXED_1 - exp);
3374
-
return load >> FSHIFT;
3375
-
}
3376
-
3377
3177
/*
3378
3178
* calc_load - update the avenrun load estimates 10 ticks after the
3379
3179
* CPUs have updated calc_load_tasks.
3380
3180
*/
3381
-
void calc_global_load(void)
3181
+
void calc_global_load(unsigned long ticks)
3382
3182
{
3383
-
unsigned long upd = calc_load_update + 10;
3384
3183
long active;
3385
3184
3386
-
if (time_before(jiffies, upd))
3185
+
calc_global_nohz(ticks);
3186
+
3187
+
if (time_before(jiffies, calc_load_update + 10))
3387
3188
return;
3388
3189
3389
3190
active = atomic_long_read(&calc_load_tasks);
···
4030
3845
{
4031
3846
if (prev->se.on_rq)
4032
3847
update_rq_clock(rq);
4033
-
rq->skip_clock_update = 0;
4034
3848
prev->sched_class->put_prev_task(rq, prev);
4035
3849
}
4036
3850
···
4087
3903
hrtick_clear(rq);
4088
3904
4089
3905
raw_spin_lock_irq(&rq->lock);
4090
-
clear_tsk_need_resched(prev);
4091
3906
4092
3907
switch_count = &prev->nivcsw;
4093
3908
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
···
4118
3935
4119
3936
put_prev_task(rq, prev);
4120
3937
next = pick_next_task(rq);
3938
+
clear_tsk_need_resched(prev);
3939
+
rq->skip_clock_update = 0;
4121
3940
4122
3941
if (likely(prev != next)) {
4123
3942
sched_info_switch(prev, next);
+44
-13
kernel/taskstats.c
+44
-13
kernel/taskstats.c
···
349
349
return ret;
350
350
}
351
351
352
+
#ifdef CONFIG_IA64
353
+
#define TASKSTATS_NEEDS_PADDING 1
354
+
#endif
355
+
352
356
static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
353
357
{
354
358
struct nlattr *na, *ret;
355
359
int aggr;
356
360
357
-
/* If we don't pad, we end up with alignment on a 4 byte boundary.
358
-
* This causes lots of runtime warnings on systems requiring 8 byte
359
-
* alignment */
360
-
u32 pids[2] = { pid, 0 };
361
-
int pid_size = ALIGN(sizeof(pid), sizeof(long));
362
-
363
361
aggr = (type == TASKSTATS_TYPE_PID)
364
362
? TASKSTATS_TYPE_AGGR_PID
365
363
: TASKSTATS_TYPE_AGGR_TGID;
366
364
365
+
/*
366
+
* The taskstats structure is internally aligned on 8 byte
367
+
* boundaries but the layout of the aggregrate reply, with
368
+
* two NLA headers and the pid (each 4 bytes), actually
369
+
* force the entire structure to be unaligned. This causes
370
+
* the kernel to issue unaligned access warnings on some
371
+
* architectures like ia64. Unfortunately, some software out there
372
+
* doesn't properly unroll the NLA packet and assumes that the start
373
+
* of the taskstats structure will always be 20 bytes from the start
374
+
* of the netlink payload. Aligning the start of the taskstats
375
+
* structure breaks this software, which we don't want. So, for now
376
+
* the alignment only happens on architectures that require it
377
+
* and those users will have to update to fixed versions of those
378
+
* packages. Space is reserved in the packet only when needed.
379
+
* This ifdef should be removed in several years e.g. 2012 once
380
+
* we can be confident that fixed versions are installed on most
381
+
* systems. We add the padding before the aggregate since the
382
+
* aggregate is already a defined type.
383
+
*/
384
+
#ifdef TASKSTATS_NEEDS_PADDING
385
+
if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
386
+
goto err;
387
+
#endif
367
388
na = nla_nest_start(skb, aggr);
368
389
if (!na)
369
390
goto err;
370
-
if (nla_put(skb, type, pid_size, pids) < 0)
391
+
392
+
if (nla_put(skb, type, sizeof(pid), &pid) < 0)
371
393
goto err;
372
394
ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
373
395
if (!ret)
···
478
456
return rc;
479
457
}
480
458
459
+
static size_t taskstats_packet_size(void)
460
+
{
461
+
size_t size;
462
+
463
+
size = nla_total_size(sizeof(u32)) +
464
+
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
465
+
#ifdef TASKSTATS_NEEDS_PADDING
466
+
size += nla_total_size(0); /* Padding for alignment */
467
+
#endif
468
+
return size;
469
+
}
470
+
481
471
static int cmd_attr_pid(struct genl_info *info)
482
472
{
483
473
struct taskstats *stats;
···
498
464
u32 pid;
499
465
int rc;
500
466
501
-
size = nla_total_size(sizeof(u32)) +
502
-
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
467
+
size = taskstats_packet_size();
503
468
504
469
rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
505
470
if (rc < 0)
···
527
494
u32 tgid;
528
495
int rc;
529
496
530
-
size = nla_total_size(sizeof(u32)) +
531
-
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
497
+
size = taskstats_packet_size();
532
498
533
499
rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
534
500
if (rc < 0)
···
602
570
/*
603
571
* Size includes space for nested attributes
604
572
*/
605
-
size = nla_total_size(sizeof(u32)) +
606
-
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
573
+
size = taskstats_packet_size();
607
574
608
575
is_thread_group = !!taskstats_tgid_alloc(tsk);
609
576
if (is_thread_group) {
+7
-1
kernel/timer.c
+7
-1
kernel/timer.c
···
1252
1252
struct tvec_base *base = __get_cpu_var(tvec_bases);
1253
1253
unsigned long expires;
1254
1254
1255
+
/*
1256
+
* Pretend that there is no timer pending if the cpu is offline.
1257
+
* Possible pending timers will be migrated later to an active cpu.
1258
+
*/
1259
+
if (cpu_is_offline(smp_processor_id()))
1260
+
return now + NEXT_TIMER_MAX_DELTA;
1255
1261
spin_lock(&base->lock);
1256
1262
if (time_before_eq(base->next_timer, base->timer_jiffies))
1257
1263
base->next_timer = __next_timer_interrupt(base);
···
1325
1319
{
1326
1320
jiffies_64 += ticks;
1327
1321
update_wall_time();
1328
-
calc_global_load();
1322
+
calc_global_load(ticks);
1329
1323
}
1330
1324
1331
1325
#ifdef __ARCH_WANT_SYS_ALARM
+8
-1
kernel/trace/ring_buffer.c
+8
-1
kernel/trace/ring_buffer.c
···
3853
3853
3854
3854
/* Need to copy one event at a time */
3855
3855
do {
3856
+
/* We need the size of one event, because
3857
+
* rb_advance_reader only advances by one event,
3858
+
* whereas rb_event_ts_length may include the size of
3859
+
* one or two events.
3860
+
* We have already ensured there's enough space if this
3861
+
* is a time extend. */
3862
+
size = rb_event_length(event);
3856
3863
memcpy(bpage->data + pos, rpage->data + rpos, size);
3857
3864
3858
3865
len -= size;
···
3874
3867
event = rb_reader_event(cpu_buffer);
3875
3868
/* Always keep the time extend and data together */
3876
3869
size = rb_event_ts_length(event);
3877
-
} while (len > size);
3870
+
} while (len >= size);
3878
3871
3879
3872
/* update bpage */
3880
3873
local_set(&bpage->commit, pos);
+9
-1
kernel/trace/trace.c
+9
-1
kernel/trace/trace.c
···
2338
2338
return count;
2339
2339
}
2340
2340
2341
+
static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2342
+
{
2343
+
if (file->f_mode & FMODE_READ)
2344
+
return seq_lseek(file, offset, origin);
2345
+
else
2346
+
return 0;
2347
+
}
2348
+
2341
2349
static const struct file_operations tracing_fops = {
2342
2350
.open = tracing_open,
2343
2351
.read = seq_read,
2344
2352
.write = tracing_write_stub,
2345
-
.llseek = seq_lseek,
2353
+
.llseek = tracing_seek,
2346
2354
.release = tracing_release,
2347
2355
};
2348
2356
-1
mm/compaction.c
-1
mm/compaction.c
+2
mm/migrate.c
+2
mm/migrate.c
+27
-1
mm/nommu.c
+27
-1
mm/nommu.c
···
10
10
* Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11
11
* Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12
12
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
13
-
* Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org>
13
+
* Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
14
14
*/
15
15
16
16
#include <linux/module.h>
···
328
328
{
329
329
return vmalloc(size);
330
330
}
331
+
EXPORT_SYMBOL(vmalloc_node);
331
332
332
333
/**
333
334
* vzalloc_node - allocate memory on a specific node with zero fill
···
440
439
void __attribute__((weak)) vmalloc_sync_all(void)
441
440
{
442
441
}
442
+
443
+
/**
444
+
* alloc_vm_area - allocate a range of kernel address space
445
+
* @size: size of the area
446
+
*
447
+
* Returns: NULL on failure, vm_struct on success
448
+
*
449
+
* This function reserves a range of kernel address space, and
450
+
* allocates pagetables to map that range. No actual mappings
451
+
* are created. If the kernel address space is not shared
452
+
* between processes, it syncs the pagetable across all
453
+
* processes.
454
+
*/
455
+
struct vm_struct *alloc_vm_area(size_t size)
456
+
{
457
+
BUG();
458
+
return NULL;
459
+
}
460
+
EXPORT_SYMBOL_GPL(alloc_vm_area);
461
+
462
+
void free_vm_area(struct vm_struct *area)
463
+
{
464
+
BUG();
465
+
}
466
+
EXPORT_SYMBOL_GPL(free_vm_area);
443
467
444
468
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
445
469
struct page *page)
+1
-1
mm/page-writeback.c
+1
-1
mm/page-writeback.c
+1
-1
mm/percpu.c
+1
-1
mm/percpu.c
···
1268
1268
1269
1269
/* we're done parsing the input, undefine BUG macro and dump config */
1270
1270
#undef PCPU_SETUP_BUG_ON
1271
-
pcpu_dump_alloc_info(KERN_INFO, ai);
1271
+
pcpu_dump_alloc_info(KERN_DEBUG, ai);
1272
1272
1273
1273
pcpu_nr_groups = ai->nr_groups;
1274
1274
pcpu_group_offsets = group_offsets;
+1
net/bluetooth/rfcomm/core.c
+1
net/bluetooth/rfcomm/core.c
+1
-1
net/bridge/br_multicast.c
+1
-1
net/bridge/br_multicast.c
···
437
437
ip6h = ipv6_hdr(skb);
438
438
439
439
*(__force __be32 *)ip6h = htonl(0x60000000);
440
-
ip6h->payload_len = 8 + sizeof(*mldq);
440
+
ip6h->payload_len = htons(8 + sizeof(*mldq));
441
441
ip6h->nexthdr = IPPROTO_HOPOPTS;
442
442
ip6h->hop_limit = 1;
443
443
ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+3
-5
net/ceph/messenger.c
+3
-5
net/ceph/messenger.c
···
97
97
int ceph_msgr_init(void)
98
98
{
99
99
ceph_msgr_wq = create_workqueue("ceph-msgr");
100
-
if (IS_ERR(ceph_msgr_wq)) {
101
-
int ret = PTR_ERR(ceph_msgr_wq);
102
-
pr_err("msgr_init failed to create workqueue: %d\n", ret);
103
-
ceph_msgr_wq = NULL;
104
-
return ret;
100
+
if (!ceph_msgr_wq) {
101
+
pr_err("msgr_init failed to create workqueue\n");
102
+
return -ENOMEM;
105
103
}
106
104
return 0;
107
105
}
+9
-6
net/ceph/pagevec.c
+9
-6
net/ceph/pagevec.c
···
13
13
* build a vector of user pages
14
14
*/
15
15
struct page **ceph_get_direct_page_vector(const char __user *data,
16
-
int num_pages)
16
+
int num_pages, bool write_page)
17
17
{
18
18
struct page **pages;
19
19
int rc;
···
24
24
25
25
down_read(¤t->mm->mmap_sem);
26
26
rc = get_user_pages(current, current->mm, (unsigned long)data,
27
-
num_pages, 0, 0, pages, NULL);
27
+
num_pages, write_page, 0, pages, NULL);
28
28
up_read(¤t->mm->mmap_sem);
29
-
if (rc < 0)
29
+
if (rc < num_pages)
30
30
goto fail;
31
31
return pages;
32
32
33
33
fail:
34
-
kfree(pages);
34
+
ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
35
35
return ERR_PTR(rc);
36
36
}
37
37
EXPORT_SYMBOL(ceph_get_direct_page_vector);
38
38
39
-
void ceph_put_page_vector(struct page **pages, int num_pages)
39
+
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
40
40
{
41
41
int i;
42
42
43
-
for (i = 0; i < num_pages; i++)
43
+
for (i = 0; i < num_pages; i++) {
44
+
if (dirty)
45
+
set_page_dirty_lock(pages[i]);
44
46
put_page(pages[i]);
47
+
}
45
48
kfree(pages);
46
49
}
47
50
EXPORT_SYMBOL(ceph_put_page_vector);
+1
-2
net/core/fib_rules.c
+1
-2
net/core/fib_rules.c
+35
-12
net/core/sock.c
+35
-12
net/core/sock.c
···
1009
1009
#endif
1010
1010
}
1011
1011
1012
+
/*
1013
+
* caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1014
+
* un-modified. Special care is taken when initializing object to zero.
1015
+
*/
1016
+
static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1017
+
{
1018
+
if (offsetof(struct sock, sk_node.next) != 0)
1019
+
memset(sk, 0, offsetof(struct sock, sk_node.next));
1020
+
memset(&sk->sk_node.pprev, 0,
1021
+
size - offsetof(struct sock, sk_node.pprev));
1022
+
}
1023
+
1024
+
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1025
+
{
1026
+
unsigned long nulls1, nulls2;
1027
+
1028
+
nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1029
+
nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1030
+
if (nulls1 > nulls2)
1031
+
swap(nulls1, nulls2);
1032
+
1033
+
if (nulls1 != 0)
1034
+
memset((char *)sk, 0, nulls1);
1035
+
memset((char *)sk + nulls1 + sizeof(void *), 0,
1036
+
nulls2 - nulls1 - sizeof(void *));
1037
+
memset((char *)sk + nulls2 + sizeof(void *), 0,
1038
+
size - nulls2 - sizeof(void *));
1039
+
}
1040
+
EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1041
+
1012
1042
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1013
1043
int family)
1014
1044
{
···
1051
1021
if (!sk)
1052
1022
return sk;
1053
1023
if (priority & __GFP_ZERO) {
1054
-
/*
1055
-
* caches using SLAB_DESTROY_BY_RCU should let
1056
-
* sk_node.next un-modified. Special care is taken
1057
-
* when initializing object to zero.
1058
-
*/
1059
-
if (offsetof(struct sock, sk_node.next) != 0)
1060
-
memset(sk, 0, offsetof(struct sock, sk_node.next));
1061
-
memset(&sk->sk_node.pprev, 0,
1062
-
prot->obj_size - offsetof(struct sock,
1063
-
sk_node.pprev));
1024
+
if (prot->clear_sk)
1025
+
prot->clear_sk(sk, prot->obj_size);
1026
+
else
1027
+
sk_prot_clear_nulls(sk, prot->obj_size);
1064
1028
}
1065
-
}
1066
-
else
1029
+
} else
1067
1030
sk = kmalloc(prot->obj_size, priority);
1068
1031
1069
1032
if (sk != NULL) {
+8
-2
net/ipv4/fib_frontend.c
+8
-2
net/ipv4/fib_frontend.c
···
163
163
.daddr = addr
164
164
}
165
165
},
166
-
.flags = FLOWI_FLAG_MATCH_ANY_IIF
167
166
};
168
167
struct fib_result res = { 0 };
169
168
struct net_device *dev = NULL;
169
+
struct fib_table *local_table;
170
+
171
+
#ifdef CONFIG_IP_MULTIPLE_TABLES
172
+
res.r = NULL;
173
+
#endif
170
174
171
175
rcu_read_lock();
172
-
if (fib_lookup(net, &fl, &res)) {
176
+
local_table = fib_get_table(net, RT_TABLE_LOCAL);
177
+
if (!local_table ||
178
+
fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
173
179
rcu_read_unlock();
174
180
return NULL;
175
181
}
+4
-3
net/ipv4/route.c
+4
-3
net/ipv4/route.c
···
2585
2585
goto out;
2586
2586
2587
2587
/* RACE: Check return value of inet_select_addr instead. */
2588
-
if (rcu_dereference(dev_out->ip_ptr) == NULL)
2589
-
goto out; /* Wrong error code */
2590
-
2588
+
if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2589
+
err = -ENETUNREACH;
2590
+
goto out;
2591
+
}
2591
2592
if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2592
2593
ipv4_is_lbcast(oldflp->fl4_dst)) {
2593
2594
if (!fl.fl4_src)
+2
-2
net/ipv4/tcp_ipv4.c
+2
-2
net/ipv4/tcp_ipv4.c
···
2030
2030
get_req:
2031
2031
req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2032
2032
}
2033
-
sk = sk_next(st->syn_wait_sk);
2033
+
sk = sk_nulls_next(st->syn_wait_sk);
2034
2034
st->state = TCP_SEQ_STATE_LISTENING;
2035
2035
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2036
2036
} else {
···
2039
2039
if (reqsk_queue_len(&icsk->icsk_accept_queue))
2040
2040
goto start_req;
2041
2041
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042
-
sk = sk_next(sk);
2042
+
sk = sk_nulls_next(sk);
2043
2043
}
2044
2044
get_sk:
2045
2045
sk_nulls_for_each_from(sk, node) {
+1
net/ipv4/udp.c
+1
net/ipv4/udp.c
+1
net/ipv4/udplite.c
+1
net/ipv4/udplite.c
+3
-1
net/ipv6/addrconf.c
+3
-1
net/ipv6/addrconf.c
···
2669
2669
2670
2670
ASSERT_RTNL();
2671
2671
2672
-
rt6_ifdown(net, dev);
2672
+
/* Flush routes if device is being removed or it is not loopback */
2673
+
if (how || !(dev->flags & IFF_LOOPBACK))
2674
+
rt6_ifdown(net, dev);
2673
2675
neigh_ifdown(&nd_tbl, dev);
2674
2676
2675
2677
idev = __in6_dev_get(dev);
+2
-10
net/ipv6/ip6_output.c
+2
-10
net/ipv6/ip6_output.c
···
56
56
#include <net/checksum.h>
57
57
#include <linux/mroute6.h>
58
58
59
-
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
59
+
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60
60
61
61
int __ip6_local_out(struct sk_buff *skb)
62
62
{
···
143
143
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
144
144
kfree_skb(skb);
145
145
return -EINVAL;
146
-
}
147
-
148
-
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
149
-
{
150
-
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
151
-
152
-
return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
153
-
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
154
146
}
155
147
156
148
static int ip6_finish_output(struct sk_buff *skb)
···
593
601
return offset;
594
602
}
595
603
596
-
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
604
+
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
597
605
{
598
606
struct sk_buff *frag;
599
607
struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
+6
-1
net/ipv6/route.c
+6
-1
net/ipv6/route.c
···
1565
1565
{
1566
1566
struct rt6_info *rt, *nrt;
1567
1567
int allfrag = 0;
1568
-
1568
+
again:
1569
1569
rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1570
1570
if (rt == NULL)
1571
1571
return;
1572
+
1573
+
if (rt6_check_expired(rt)) {
1574
+
ip6_del_rt(rt);
1575
+
goto again;
1576
+
}
1572
1577
1573
1578
if (pmtu >= dst_mtu(&rt->dst))
1574
1579
goto out;
+1
net/ipv6/udp.c
+1
net/ipv6/udp.c
+1
net/ipv6/udplite.c
+1
net/ipv6/udplite.c
+15
-1
net/ipv6/xfrm6_output.c
+15
-1
net/ipv6/xfrm6_output.c
···
17
17
#include <linux/netfilter_ipv6.h>
18
18
#include <net/dst.h>
19
19
#include <net/ipv6.h>
20
+
#include <net/ip6_route.h>
20
21
#include <net/xfrm.h>
21
22
22
23
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
···
89
88
return xfrm_output(skb);
90
89
}
91
90
91
+
static int __xfrm6_output(struct sk_buff *skb)
92
+
{
93
+
struct dst_entry *dst = skb_dst(skb);
94
+
struct xfrm_state *x = dst->xfrm;
95
+
96
+
if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
97
+
((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
98
+
dst_allfrag(skb_dst(skb)))) {
99
+
return ip6_fragment(skb, xfrm6_output_finish);
100
+
}
101
+
return xfrm6_output_finish(skb);
102
+
}
103
+
92
104
int xfrm6_output(struct sk_buff *skb)
93
105
{
94
106
return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
95
-
skb_dst(skb)->dev, xfrm6_output_finish);
107
+
skb_dst(skb)->dev, __xfrm6_output);
96
108
}
+11
-7
net/irda/af_irda.c
+11
-7
net/irda/af_irda.c
···
2280
2280
2281
2281
switch (optname) {
2282
2282
case IRLMP_ENUMDEVICES:
2283
+
2284
+
/* Offset to first device entry */
2285
+
offset = sizeof(struct irda_device_list) -
2286
+
sizeof(struct irda_device_info);
2287
+
2288
+
if (len < offset) {
2289
+
err = -EINVAL;
2290
+
goto out;
2291
+
}
2292
+
2283
2293
/* Ask lmp for the current discovery log */
2284
2294
discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
2285
2295
self->nslots);
···
2300
2290
}
2301
2291
2302
2292
/* Write total list length back to client */
2303
-
if (copy_to_user(optval, &list,
2304
-
sizeof(struct irda_device_list) -
2305
-
sizeof(struct irda_device_info)))
2293
+
if (copy_to_user(optval, &list, offset))
2306
2294
err = -EFAULT;
2307
-
2308
-
/* Offset to first device entry */
2309
-
offset = sizeof(struct irda_device_list) -
2310
-
sizeof(struct irda_device_info);
2311
2295
2312
2296
/* Copy the list itself - watch for overflow */
2313
2297
if (list.len > 2048) {
+4
net/mac80211/ibss.c
+4
net/mac80211/ibss.c
···
780
780
781
781
mutex_lock(&sdata->u.ibss.mtx);
782
782
783
+
if (!sdata->u.ibss.ssid_len)
784
+
goto mgmt_out; /* not ready to merge yet */
785
+
783
786
switch (fc & IEEE80211_FCTL_STYPE) {
784
787
case IEEE80211_STYPE_PROBE_REQ:
785
788
ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
···
800
797
break;
801
798
}
802
799
800
+
mgmt_out:
803
801
mutex_unlock(&sdata->u.ibss.mtx);
804
802
}
805
803
+4
-1
net/mac80211/rx.c
+4
-1
net/mac80211/rx.c
···
1788
1788
1789
1789
fwd_skb = skb_copy(skb, GFP_ATOMIC);
1790
1790
1791
-
if (!fwd_skb && net_ratelimit())
1791
+
if (!fwd_skb && net_ratelimit()) {
1792
1792
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1793
1793
sdata->name);
1794
+
goto out;
1795
+
}
1794
1796
1795
1797
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1796
1798
memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
···
1830
1828
}
1831
1829
}
1832
1830
1831
+
out:
1833
1832
if (is_multicast_ether_addr(hdr->addr1) ||
1834
1833
sdata->dev->flags & IFF_PROMISC)
1835
1834
return RX_CONTINUE;
+4
-1
net/mac80211/work.c
+4
-1
net/mac80211/work.c
···
1051
1051
{
1052
1052
struct ieee80211_local *local = sdata->local;
1053
1053
struct ieee80211_work *wk;
1054
+
bool cleanup = false;
1054
1055
1055
1056
mutex_lock(&local->mtx);
1056
1057
list_for_each_entry(wk, &local->work_list, list) {
1057
1058
if (wk->sdata != sdata)
1058
1059
continue;
1060
+
cleanup = true;
1059
1061
wk->type = IEEE80211_WORK_ABORT;
1060
1062
wk->started = true;
1061
1063
wk->timeout = jiffies;
···
1065
1063
mutex_unlock(&local->mtx);
1066
1064
1067
1065
/* run cleanups etc. */
1068
-
ieee80211_work_work(&local->work_work);
1066
+
if (cleanup)
1067
+
ieee80211_work_work(&local->work_work);
1069
1068
1070
1069
mutex_lock(&local->mtx);
1071
1070
list_for_each_entry(wk, &local->work_list, list) {
+8
-12
net/sched/sch_sfq.c
+8
-12
net/sched/sch_sfq.c
···
270
270
/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
271
271
d = q->next[q->tail];
272
272
q->next[q->tail] = q->next[d];
273
-
q->allot[q->next[d]] += q->quantum;
274
273
skb = q->qs[d].prev;
275
274
len = qdisc_pkt_len(skb);
276
275
__skb_unlink(skb, &q->qs[d]);
···
320
321
sfq_inc(q, x);
321
322
if (q->qs[x].qlen == 1) { /* The flow is new */
322
323
if (q->tail == SFQ_DEPTH) { /* It is the first flow */
323
-
q->tail = x;
324
324
q->next[x] = x;
325
-
q->allot[x] = q->quantum;
326
325
} else {
327
326
q->next[x] = q->next[q->tail];
328
327
q->next[q->tail] = x;
329
-
q->tail = x;
330
328
}
329
+
q->tail = x;
330
+
q->allot[x] = q->quantum;
331
331
}
332
332
if (++sch->q.qlen <= q->limit) {
333
333
sch->bstats.bytes += qdisc_pkt_len(skb);
···
357
359
{
358
360
struct sfq_sched_data *q = qdisc_priv(sch);
359
361
struct sk_buff *skb;
360
-
sfq_index a, old_a;
362
+
sfq_index a, next_a;
361
363
362
364
/* No active slots */
363
365
if (q->tail == SFQ_DEPTH)
364
366
return NULL;
365
367
366
-
a = old_a = q->next[q->tail];
368
+
a = q->next[q->tail];
367
369
368
370
/* Grab packet */
369
371
skb = __skb_dequeue(&q->qs[a]);
···
374
376
/* Is the slot empty? */
375
377
if (q->qs[a].qlen == 0) {
376
378
q->ht[q->hash[a]] = SFQ_DEPTH;
377
-
a = q->next[a];
378
-
if (a == old_a) {
379
+
next_a = q->next[a];
380
+
if (a == next_a) {
379
381
q->tail = SFQ_DEPTH;
380
382
return skb;
381
383
}
382
-
q->next[q->tail] = a;
383
-
q->allot[a] += q->quantum;
384
+
q->next[q->tail] = next_a;
384
385
} else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
385
-
q->tail = a;
386
-
a = q->next[a];
387
386
q->allot[a] += q->quantum;
387
+
q->tail = a;
388
388
}
389
389
return skb;
390
390
}
+1
-1
net/sctp/socket.c
+1
-1
net/sctp/socket.c
+1
-1
scripts/recordmcount.h
+1
-1
scripts/recordmcount.h
···
119
119
120
120
static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type)
121
121
{
122
-
rp->r_info = ELF_R_INFO(sym, type);
122
+
rp->r_info = _w(ELF_R_INFO(sym, type));
123
123
}
124
124
static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO;
125
125
-1
security/keys/request_key.c
-1
security/keys/request_key.c
+7
-3
sound/core/pcm_lib.c
+7
-3
sound/core/pcm_lib.c
···
1070
1070
struct snd_pcm_hw_rule *new;
1071
1071
unsigned int new_rules = constrs->rules_all + 16;
1072
1072
new = kcalloc(new_rules, sizeof(*c), GFP_KERNEL);
1073
-
if (!new)
1073
+
if (!new) {
1074
+
va_end(args);
1074
1075
return -ENOMEM;
1076
+
}
1075
1077
if (constrs->rules) {
1076
1078
memcpy(new, constrs->rules,
1077
1079
constrs->rules_num * sizeof(*c));
···
1089
1087
c->private = private;
1090
1088
k = 0;
1091
1089
while (1) {
1092
-
if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps)))
1090
+
if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1091
+
va_end(args);
1093
1092
return -EINVAL;
1093
+
}
1094
1094
c->deps[k++] = dep;
1095
1095
if (dep < 0)
1096
1096
break;
···
1101
1097
constrs->rules_num++;
1102
1098
va_end(args);
1103
1099
return 0;
1104
-
}
1100
+
}
1105
1101
1106
1102
EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1107
1103
+34
-23
sound/pci/hda/hda_codec.c
+34
-23
sound/pci/hda/hda_codec.c
···
1919
1919
}
1920
1920
EXPORT_SYMBOL_HDA(snd_hda_find_mixer_ctl);
1921
1921
1922
+
static int find_empty_mixer_ctl_idx(struct hda_codec *codec, const char *name)
1923
+
{
1924
+
int idx;
1925
+
for (idx = 0; idx < 16; idx++) { /* 16 ctlrs should be large enough */
1926
+
if (!_snd_hda_find_mixer_ctl(codec, name, idx))
1927
+
return idx;
1928
+
}
1929
+
return -EBUSY;
1930
+
}
1931
+
1922
1932
/**
1923
1933
* snd_hda_ctl_add - Add a control element and assign to the codec
1924
1934
* @codec: HD-audio codec
···
2664
2654
{ } /* end */
2665
2655
};
2666
2656
2667
-
#define SPDIF_MAX_IDX 4 /* 4 instances should be enough to probe */
2668
-
2669
2657
/**
2670
2658
* snd_hda_create_spdif_out_ctls - create Output SPDIF-related controls
2671
2659
* @codec: the HDA codec
···
2681
2673
struct snd_kcontrol_new *dig_mix;
2682
2674
int idx;
2683
2675
2684
-
for (idx = 0; idx < SPDIF_MAX_IDX; idx++) {
2685
-
if (!_snd_hda_find_mixer_ctl(codec, "IEC958 Playback Switch",
2686
-
idx))
2687
-
break;
2688
-
}
2689
-
if (idx >= SPDIF_MAX_IDX) {
2676
+
idx = find_empty_mixer_ctl_idx(codec, "IEC958 Playback Switch");
2677
+
if (idx < 0) {
2690
2678
printk(KERN_ERR "hda_codec: too many IEC958 outputs\n");
2691
2679
return -EBUSY;
2692
2680
}
···
2833
2829
struct snd_kcontrol_new *dig_mix;
2834
2830
int idx;
2835
2831
2836
-
for (idx = 0; idx < SPDIF_MAX_IDX; idx++) {
2837
-
if (!_snd_hda_find_mixer_ctl(codec, "IEC958 Capture Switch",
2838
-
idx))
2839
-
break;
2840
-
}
2841
-
if (idx >= SPDIF_MAX_IDX) {
2832
+
idx = find_empty_mixer_ctl_idx(codec, "IEC958 Capture Switch");
2833
+
if (idx < 0) {
2842
2834
printk(KERN_ERR "hda_codec: too many IEC958 inputs\n");
2843
2835
return -EBUSY;
2844
2836
}
···
3808
3808
3809
3809
for (; knew->name; knew++) {
3810
3810
struct snd_kcontrol *kctl;
3811
+
int addr = 0, idx = 0;
3811
3812
if (knew->iface == -1) /* skip this codec private value */
3812
3813
continue;
3813
-
kctl = snd_ctl_new1(knew, codec);
3814
-
if (!kctl)
3815
-
return -ENOMEM;
3816
-
err = snd_hda_ctl_add(codec, 0, kctl);
3817
-
if (err < 0) {
3818
-
if (!codec->addr)
3819
-
return err;
3814
+
for (;;) {
3820
3815
kctl = snd_ctl_new1(knew, codec);
3821
3816
if (!kctl)
3822
3817
return -ENOMEM;
3823
-
kctl->id.device = codec->addr;
3818
+
if (addr > 0)
3819
+
kctl->id.device = addr;
3820
+
if (idx > 0)
3821
+
kctl->id.index = idx;
3824
3822
err = snd_hda_ctl_add(codec, 0, kctl);
3825
-
if (err < 0)
3823
+
if (!err)
3824
+
break;
3825
+
/* try first with another device index corresponding to
3826
+
* the codec addr; if it still fails (or it's the
3827
+
* primary codec), then try another control index
3828
+
*/
3829
+
if (!addr && codec->addr)
3830
+
addr = codec->addr;
3831
+
else if (!idx && !knew->index) {
3832
+
idx = find_empty_mixer_ctl_idx(codec,
3833
+
knew->name);
3834
+
if (idx <= 0)
3835
+
return err;
3836
+
} else
3826
3837
return err;
3827
3838
}
3828
3839
}
+39
-25
sound/pci/hda/patch_realtek.c
+39
-25
sound/pci/hda/patch_realtek.c
···
14806
14806
14807
14807
enum {
14808
14808
ALC269_FIXUP_SONY_VAIO,
14809
+
ALC275_FIX_SONY_VAIO_GPIO2,
14809
14810
ALC269_FIXUP_DELL_M101Z,
14810
-
ALC269_FIXUP_LENOVO_EDGE14,
14811
+
ALC269_FIXUP_SKU_IGNORE,
14811
14812
ALC269_FIXUP_ASUS_G73JW,
14812
14813
};
14813
14814
···
14819
14818
{}
14820
14819
}
14821
14820
},
14821
+
[ALC275_FIX_SONY_VAIO_GPIO2] = {
14822
+
.verbs = (const struct hda_verb[]) {
14823
+
{0x01, AC_VERB_SET_GPIO_MASK, 0x04},
14824
+
{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
14825
+
{0x01, AC_VERB_SET_GPIO_DATA, 0x00},
14826
+
{ }
14827
+
}
14828
+
},
14822
14829
[ALC269_FIXUP_DELL_M101Z] = {
14823
14830
.verbs = (const struct hda_verb[]) {
14824
14831
/* Enables internal speaker */
···
14835
14826
{}
14836
14827
}
14837
14828
},
14838
-
[ALC269_FIXUP_LENOVO_EDGE14] = {
14829
+
[ALC269_FIXUP_SKU_IGNORE] = {
14839
14830
.sku = ALC_FIXUP_SKU_IGNORE,
14840
14831
},
14841
14832
[ALC269_FIXUP_ASUS_G73JW] = {
···
14847
14838
};
14848
14839
14849
14840
static struct snd_pci_quirk alc269_fixup_tbl[] = {
14841
+
SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
14842
+
SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
14843
+
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
14850
14844
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14851
14845
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
14852
-
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_LENOVO_EDGE14),
14846
+
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
14847
+
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
14853
14848
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
14854
14849
{}
14855
14850
};
···
15104
15091
15105
15092
alc_auto_parse_customize_define(codec);
15106
15093
15107
-
coef = alc_read_coef_idx(codec, 0);
15108
-
if ((coef & 0x00f0) == 0x0010) {
15109
-
if (codec->bus->pci->subsystem_vendor == 0x1025 &&
15110
-
spec->cdefine.platform_type == 1) {
15111
-
alc_codec_rename(codec, "ALC271X");
15112
-
spec->codec_variant = ALC269_TYPE_ALC271X;
15113
-
} else if ((coef & 0xf000) == 0x1000) {
15114
-
spec->codec_variant = ALC269_TYPE_ALC270;
15115
-
} else if ((coef & 0xf000) == 0x2000) {
15116
-
alc_codec_rename(codec, "ALC259");
15117
-
spec->codec_variant = ALC269_TYPE_ALC259;
15118
-
} else if ((coef & 0xf000) == 0x3000) {
15119
-
alc_codec_rename(codec, "ALC258");
15120
-
spec->codec_variant = ALC269_TYPE_ALC258;
15121
-
} else {
15122
-
alc_codec_rename(codec, "ALC269VB");
15123
-
spec->codec_variant = ALC269_TYPE_ALC269VB;
15124
-
}
15125
-
} else
15126
-
alc_fix_pll_init(codec, 0x20, 0x04, 15);
15127
-
15128
-
alc269_fill_coef(codec);
15094
+
if (codec->vendor_id == 0x10ec0269) {
15095
+
coef = alc_read_coef_idx(codec, 0);
15096
+
if ((coef & 0x00f0) == 0x0010) {
15097
+
if (codec->bus->pci->subsystem_vendor == 0x1025 &&
15098
+
spec->cdefine.platform_type == 1) {
15099
+
alc_codec_rename(codec, "ALC271X");
15100
+
spec->codec_variant = ALC269_TYPE_ALC271X;
15101
+
} else if ((coef & 0xf000) == 0x1000) {
15102
+
spec->codec_variant = ALC269_TYPE_ALC270;
15103
+
} else if ((coef & 0xf000) == 0x2000) {
15104
+
alc_codec_rename(codec, "ALC259");
15105
+
spec->codec_variant = ALC269_TYPE_ALC259;
15106
+
} else if ((coef & 0xf000) == 0x3000) {
15107
+
alc_codec_rename(codec, "ALC258");
15108
+
spec->codec_variant = ALC269_TYPE_ALC258;
15109
+
} else {
15110
+
alc_codec_rename(codec, "ALC269VB");
15111
+
spec->codec_variant = ALC269_TYPE_ALC269VB;
15112
+
}
15113
+
} else
15114
+
alc_fix_pll_init(codec, 0x20, 0x04, 15);
15115
+
alc269_fill_coef(codec);
15116
+
}
15129
15117
15130
15118
board_config = snd_hda_check_board_config(codec, ALC269_MODEL_LAST,
15131
15119
alc269_models,
+2
-3
sound/pci/hda/patch_sigmatel.c
+2
-3
sound/pci/hda/patch_sigmatel.c
···
3481
3481
3482
3482
label = hda_get_input_pin_label(codec, nid, 1);
3483
3483
snd_hda_add_imux_item(dimux, label, index, &type_idx);
3484
+
if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1)
3485
+
snd_hda_add_imux_item(imux, label, index, &type_idx);
3484
3486
3485
3487
err = create_elem_capture_vol(codec, nid, label, type_idx,
3486
3488
HDA_INPUT);
···
3494
3492
if (err < 0)
3495
3493
return err;
3496
3494
}
3497
-
3498
-
if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1)
3499
-
snd_hda_add_imux_item(imux, label, index, NULL);
3500
3495
}
3501
3496
3502
3497
return 0;
+1
-2
tools/perf/builtin-buildid-list.c
+1
-2
tools/perf/builtin-buildid-list.c
···
36
36
37
37
static int __cmd_buildid_list(void)
38
38
{
39
-
int err = -1;
40
39
struct perf_session *session;
41
40
42
41
session = perf_session__new(input_name, O_RDONLY, force, false);
···
48
49
perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
49
50
50
51
perf_session__delete(session);
51
-
return err;
52
+
return 0;
52
53
}
53
54
54
55
int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
+5
tools/perf/builtin-probe.c
+5
tools/perf/builtin-probe.c
···
249
249
!params.show_lines))
250
250
usage_with_options(probe_usage, options);
251
251
252
+
/*
253
+
* Only consider the user's kernel image path if given.
254
+
*/
255
+
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
256
+
252
257
if (params.list_events) {
253
258
if (params.mod_events) {
254
259
pr_err(" Error: Don't use --list with --add/--del.\n");
+6
-4
tools/perf/util/header.c
+6
-4
tools/perf/util/header.c
···
265
265
const char *name, bool is_kallsyms)
266
266
{
267
267
const size_t size = PATH_MAX;
268
-
char *filename = malloc(size),
268
+
char *realname = realpath(name, NULL),
269
+
*filename = malloc(size),
269
270
*linkname = malloc(size), *targetname;
270
271
int len, err = -1;
271
272
272
-
if (filename == NULL || linkname == NULL)
273
+
if (realname == NULL || filename == NULL || linkname == NULL)
273
274
goto out_free;
274
275
275
276
len = snprintf(filename, size, "%s%s%s",
276
-
debugdir, is_kallsyms ? "/" : "", name);
277
+
debugdir, is_kallsyms ? "/" : "", realname);
277
278
if (mkdir_p(filename, 0755))
278
279
goto out_free;
279
280
···
284
283
if (is_kallsyms) {
285
284
if (copyfile("/proc/kallsyms", filename))
286
285
goto out_free;
287
-
} else if (link(name, filename) && copyfile(name, filename))
286
+
} else if (link(realname, filename) && copyfile(name, filename))
288
287
goto out_free;
289
288
}
290
289
···
301
300
if (symlink(targetname, linkname) == 0)
302
301
err = 0;
303
302
out_free:
303
+
free(realname);
304
304
free(filename);
305
305
free(linkname);
306
306
return err;
+12
-3
tools/perf/util/probe-event.c
+12
-3
tools/perf/util/probe-event.c
···
114
114
const char *kernel_get_module_path(const char *module)
115
115
{
116
116
struct dso *dso;
117
+
struct map *map;
118
+
const char *vmlinux_name;
117
119
118
120
if (module) {
119
121
list_for_each_entry(dso, &machine.kernel_dsos, node) {
···
125
123
}
126
124
pr_debug("Failed to find module %s.\n", module);
127
125
return NULL;
126
+
}
127
+
128
+
map = machine.vmlinux_maps[MAP__FUNCTION];
129
+
dso = map->dso;
130
+
131
+
vmlinux_name = symbol_conf.vmlinux_name;
132
+
if (vmlinux_name) {
133
+
if (dso__load_vmlinux(dso, map, vmlinux_name, NULL) <= 0)
134
+
return NULL;
128
135
} else {
129
-
dso = machine.vmlinux_maps[MAP__FUNCTION]->dso;
130
-
if (dso__load_vmlinux_path(dso,
131
-
machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
136
+
if (dso__load_vmlinux_path(dso, map, NULL) <= 0) {
132
137
pr_debug("Failed to load kernel map.\n");
133
138
return NULL;
134
139
}
+55
-30
tools/perf/util/probe-finder.c
+55
-30
tools/perf/util/probe-finder.c
···
117
117
}
118
118
119
119
/* Dwarf FL wrappers */
120
-
121
-
static int __linux_kernel_find_elf(Dwfl_Module *mod,
122
-
void **userdata,
123
-
const char *module_name,
124
-
Dwarf_Addr base,
125
-
char **file_name, Elf **elfp)
126
-
{
127
-
int fd;
128
-
const char *path = kernel_get_module_path(module_name);
129
-
130
-
if (path) {
131
-
fd = open(path, O_RDONLY);
132
-
if (fd >= 0) {
133
-
*file_name = strdup(path);
134
-
return fd;
135
-
}
136
-
}
137
-
/* If failed, try to call standard method */
138
-
return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
139
-
file_name, elfp);
140
-
}
141
-
142
120
static char *debuginfo_path; /* Currently dummy */
143
121
144
122
static const Dwfl_Callbacks offline_callbacks = {
···
127
149
128
150
/* We use this table for core files too. */
129
151
.find_elf = dwfl_build_id_find_elf,
130
-
};
131
-
132
-
static const Dwfl_Callbacks kernel_callbacks = {
133
-
.find_debuginfo = dwfl_standard_find_debuginfo,
134
-
.debuginfo_path = &debuginfo_path,
135
-
136
-
.find_elf = __linux_kernel_find_elf,
137
-
.section_address = dwfl_linux_kernel_module_section_address,
138
152
};
139
153
140
154
/* Get a Dwarf from offline image */
···
155
185
return dbg;
156
186
}
157
187
188
+
#if _ELFUTILS_PREREQ(0, 148)
189
+
/* This method is buggy if elfutils is older than 0.148 */
190
+
static int __linux_kernel_find_elf(Dwfl_Module *mod,
191
+
void **userdata,
192
+
const char *module_name,
193
+
Dwarf_Addr base,
194
+
char **file_name, Elf **elfp)
195
+
{
196
+
int fd;
197
+
const char *path = kernel_get_module_path(module_name);
198
+
199
+
pr_debug2("Use file %s for %s\n", path, module_name);
200
+
if (path) {
201
+
fd = open(path, O_RDONLY);
202
+
if (fd >= 0) {
203
+
*file_name = strdup(path);
204
+
return fd;
205
+
}
206
+
}
207
+
/* If failed, try to call standard method */
208
+
return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
209
+
file_name, elfp);
210
+
}
211
+
212
+
static const Dwfl_Callbacks kernel_callbacks = {
213
+
.find_debuginfo = dwfl_standard_find_debuginfo,
214
+
.debuginfo_path = &debuginfo_path,
215
+
216
+
.find_elf = __linux_kernel_find_elf,
217
+
.section_address = dwfl_linux_kernel_module_section_address,
218
+
};
219
+
158
220
/* Get a Dwarf from live kernel image */
159
221
static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp,
160
222
Dwarf_Addr *bias)
···
207
205
dbg = dwfl_addrdwarf(*dwflp, addr, bias);
208
206
/* Here, check whether we could get a real dwarf */
209
207
if (!dbg) {
208
+
pr_debug("Failed to find kernel dwarf at %lx\n",
209
+
(unsigned long)addr);
210
210
dwfl_end(*dwflp);
211
211
*dwflp = NULL;
212
212
}
213
213
return dbg;
214
214
}
215
+
#else
216
+
/* With older elfutils, this just support kernel module... */
217
+
static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr __used, Dwfl **dwflp,
218
+
Dwarf_Addr *bias)
219
+
{
220
+
int fd;
221
+
const char *path = kernel_get_module_path("kernel");
222
+
223
+
if (!path) {
224
+
pr_err("Failed to find vmlinux path\n");
225
+
return NULL;
226
+
}
227
+
228
+
pr_debug2("Use file %s for debuginfo\n", path);
229
+
fd = open(path, O_RDONLY);
230
+
if (fd < 0)
231
+
return NULL;
232
+
233
+
return dwfl_init_offline_dwarf(fd, dwflp, bias);
234
+
}
235
+
#endif
215
236
216
237
/* Dwarf wrappers */
217
238
+1
-1
tools/perf/util/string.c
+1
-1
tools/perf/util/string.c
+2
-2
tools/perf/util/symbol.c
+2
-2
tools/perf/util/symbol.c
···
1780
1780
return -1;
1781
1781
}
1782
1782
1783
-
static int dso__load_vmlinux(struct dso *self, struct map *map,
1784
-
const char *vmlinux, symbol_filter_t filter)
1783
+
int dso__load_vmlinux(struct dso *self, struct map *map,
1784
+
const char *vmlinux, symbol_filter_t filter)
1785
1785
{
1786
1786
int err = -1, fd;
1787
1787
+2
tools/perf/util/symbol.h
+2
tools/perf/util/symbol.h
···
166
166
struct dso *__dsos__findnew(struct list_head *head, const char *name);
167
167
168
168
int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
169
+
int dso__load_vmlinux(struct dso *self, struct map *map,
170
+
const char *vmlinux, symbol_filter_t filter);
169
171
int dso__load_vmlinux_path(struct dso *self, struct map *map,
170
172
symbol_filter_t filter);
171
173
int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,