tangled
alpha
login
or
join now
tjh.dev
/
kernel
1
fork
atom
Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1
fork
atom
overview
issues
pulls
pipelines
Merge git://oak/home/sfr/kernels/iseries/work
Paul Mackerras
20 years ago
0c95fbb2
49b09853
+181
-4
3 changed files
expand all
collapse all
unified
split
arch
powerpc
kernel
prom.c
platforms
iseries
setup.c
include
asm-powerpc
atomic.h
+3
-3
arch/powerpc/kernel/prom.c
reviewed
···
1080
1080
static int __init early_init_dt_scan_cpus(unsigned long node,
1081
1081
const char *uname, int depth, void *data)
1082
1082
{
1083
1083
-
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1084
1083
u32 *prop;
1085
1085
-
unsigned long size = 0;
1084
1084
+
unsigned long size;
1085
1085
+
char *type = of_get_flat_dt_prop(node, "device_type", &size);
1086
1086
1087
1087
/* We are scanning "cpu" nodes only */
1088
1088
if (type == NULL || strcmp(type, "cpu") != 0)
···
1108
1108
1109
1109
#ifdef CONFIG_ALTIVEC
1110
1110
/* Check if we have a VMX and eventually update CPU features */
1111
1111
-
prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
1111
1111
+
prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1112
1112
if (prop && (*prop) > 0) {
1113
1113
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1114
1114
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-1
arch/powerpc/platforms/iseries/setup.c
reviewed
···
704
704
705
705
static void iseries_dedicated_idle(void)
706
706
{
707
707
-
long oldval;
708
707
set_thread_flag(TIF_POLLING_NRFLAG);
709
708
710
709
while (1) {
+178
include/asm-powerpc/atomic.h
reviewed
···
197
197
#define smp_mb__before_atomic_inc() smp_mb()
198
198
#define smp_mb__after_atomic_inc() smp_mb()
199
199
200
200
+
#ifdef __powerpc64__
201
201
+
202
202
+
typedef struct { volatile long counter; } atomic64_t;
203
203
+
204
204
+
#define ATOMIC64_INIT(i) { (i) }
205
205
+
206
206
+
#define atomic64_read(v) ((v)->counter)
207
207
+
#define atomic64_set(v,i) (((v)->counter) = (i))
208
208
+
209
209
+
static __inline__ void atomic64_add(long a, atomic64_t *v)
210
210
+
{
211
211
+
long t;
212
212
+
213
213
+
__asm__ __volatile__(
214
214
+
"1: ldarx %0,0,%3 # atomic64_add\n\
215
215
+
add %0,%2,%0\n\
216
216
+
stdcx. %0,0,%3 \n\
217
217
+
bne- 1b"
218
218
+
: "=&r" (t), "=m" (v->counter)
219
219
+
: "r" (a), "r" (&v->counter), "m" (v->counter)
220
220
+
: "cc");
221
221
+
}
222
222
+
223
223
+
static __inline__ long atomic64_add_return(long a, atomic64_t *v)
224
224
+
{
225
225
+
long t;
226
226
+
227
227
+
__asm__ __volatile__(
228
228
+
EIEIO_ON_SMP
229
229
+
"1: ldarx %0,0,%2 # atomic64_add_return\n\
230
230
+
add %0,%1,%0\n\
231
231
+
stdcx. %0,0,%2 \n\
232
232
+
bne- 1b"
233
233
+
ISYNC_ON_SMP
234
234
+
: "=&r" (t)
235
235
+
: "r" (a), "r" (&v->counter)
236
236
+
: "cc", "memory");
237
237
+
238
238
+
return t;
239
239
+
}
240
240
+
241
241
+
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
242
242
+
243
243
+
static __inline__ void atomic64_sub(long a, atomic64_t *v)
244
244
+
{
245
245
+
long t;
246
246
+
247
247
+
__asm__ __volatile__(
248
248
+
"1: ldarx %0,0,%3 # atomic64_sub\n\
249
249
+
subf %0,%2,%0\n\
250
250
+
stdcx. %0,0,%3 \n\
251
251
+
bne- 1b"
252
252
+
: "=&r" (t), "=m" (v->counter)
253
253
+
: "r" (a), "r" (&v->counter), "m" (v->counter)
254
254
+
: "cc");
255
255
+
}
256
256
+
257
257
+
static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
258
258
+
{
259
259
+
long t;
260
260
+
261
261
+
__asm__ __volatile__(
262
262
+
EIEIO_ON_SMP
263
263
+
"1: ldarx %0,0,%2 # atomic64_sub_return\n\
264
264
+
subf %0,%1,%0\n\
265
265
+
stdcx. %0,0,%2 \n\
266
266
+
bne- 1b"
267
267
+
ISYNC_ON_SMP
268
268
+
: "=&r" (t)
269
269
+
: "r" (a), "r" (&v->counter)
270
270
+
: "cc", "memory");
271
271
+
272
272
+
return t;
273
273
+
}
274
274
+
275
275
+
static __inline__ void atomic64_inc(atomic64_t *v)
276
276
+
{
277
277
+
long t;
278
278
+
279
279
+
__asm__ __volatile__(
280
280
+
"1: ldarx %0,0,%2 # atomic64_inc\n\
281
281
+
addic %0,%0,1\n\
282
282
+
stdcx. %0,0,%2 \n\
283
283
+
bne- 1b"
284
284
+
: "=&r" (t), "=m" (v->counter)
285
285
+
: "r" (&v->counter), "m" (v->counter)
286
286
+
: "cc");
287
287
+
}
288
288
+
289
289
+
static __inline__ long atomic64_inc_return(atomic64_t *v)
290
290
+
{
291
291
+
long t;
292
292
+
293
293
+
__asm__ __volatile__(
294
294
+
EIEIO_ON_SMP
295
295
+
"1: ldarx %0,0,%1 # atomic64_inc_return\n\
296
296
+
addic %0,%0,1\n\
297
297
+
stdcx. %0,0,%1 \n\
298
298
+
bne- 1b"
299
299
+
ISYNC_ON_SMP
300
300
+
: "=&r" (t)
301
301
+
: "r" (&v->counter)
302
302
+
: "cc", "memory");
303
303
+
304
304
+
return t;
305
305
+
}
306
306
+
307
307
+
/*
308
308
+
* atomic64_inc_and_test - increment and test
309
309
+
* @v: pointer of type atomic64_t
310
310
+
*
311
311
+
* Atomically increments @v by 1
312
312
+
* and returns true if the result is zero, or false for all
313
313
+
* other cases.
314
314
+
*/
315
315
+
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
316
316
+
317
317
+
static __inline__ void atomic64_dec(atomic64_t *v)
318
318
+
{
319
319
+
long t;
320
320
+
321
321
+
__asm__ __volatile__(
322
322
+
"1: ldarx %0,0,%2 # atomic64_dec\n\
323
323
+
addic %0,%0,-1\n\
324
324
+
stdcx. %0,0,%2\n\
325
325
+
bne- 1b"
326
326
+
: "=&r" (t), "=m" (v->counter)
327
327
+
: "r" (&v->counter), "m" (v->counter)
328
328
+
: "cc");
329
329
+
}
330
330
+
331
331
+
static __inline__ long atomic64_dec_return(atomic64_t *v)
332
332
+
{
333
333
+
long t;
334
334
+
335
335
+
__asm__ __volatile__(
336
336
+
EIEIO_ON_SMP
337
337
+
"1: ldarx %0,0,%1 # atomic64_dec_return\n\
338
338
+
addic %0,%0,-1\n\
339
339
+
stdcx. %0,0,%1\n\
340
340
+
bne- 1b"
341
341
+
ISYNC_ON_SMP
342
342
+
: "=&r" (t)
343
343
+
: "r" (&v->counter)
344
344
+
: "cc", "memory");
345
345
+
346
346
+
return t;
347
347
+
}
348
348
+
349
349
+
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
350
350
+
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
351
351
+
352
352
+
/*
353
353
+
* Atomically test *v and decrement if it is greater than 0.
354
354
+
* The function returns the old value of *v minus 1.
355
355
+
*/
356
356
+
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
357
357
+
{
358
358
+
long t;
359
359
+
360
360
+
__asm__ __volatile__(
361
361
+
EIEIO_ON_SMP
362
362
+
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
363
363
+
addic. %0,%0,-1\n\
364
364
+
blt- 2f\n\
365
365
+
stdcx. %0,0,%1\n\
366
366
+
bne- 1b"
367
367
+
ISYNC_ON_SMP
368
368
+
"\n\
369
369
+
2:" : "=&r" (t)
370
370
+
: "r" (&v->counter)
371
371
+
: "cc", "memory");
372
372
+
373
373
+
return t;
374
374
+
}
375
375
+
376
376
+
#endif /* __powerpc64__ */
377
377
+
200
378
#endif /* __KERNEL__ */
201
379
#endif /* _ASM_POWERPC_ATOMIC_H_ */