Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://oak/home/sfr/kernels/iseries/work

+181 -4
+3 -3
arch/powerpc/kernel/prom.c
··· 1080 1080 static int __init early_init_dt_scan_cpus(unsigned long node, 1081 1081 const char *uname, int depth, void *data) 1082 1082 { 1083 - char *type = of_get_flat_dt_prop(node, "device_type", NULL); 1084 1083 u32 *prop; 1085 - unsigned long size = 0; 1084 + unsigned long size; 1085 + char *type = of_get_flat_dt_prop(node, "device_type", &size); 1086 1086 1087 1087 /* We are scanning "cpu" nodes only */ 1088 1088 if (type == NULL || strcmp(type, "cpu") != 0) ··· 1108 1108 1109 1109 #ifdef CONFIG_ALTIVEC 1110 1110 /* Check if we have a VMX and eventually update CPU features */ 1111 - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size); 1111 + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); 1112 1112 if (prop && (*prop) > 0) { 1113 1113 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1114 1114 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-1
arch/powerpc/platforms/iseries/setup.c
··· 704 704 705 705 static void iseries_dedicated_idle(void) 706 706 { 707 - long oldval; 708 707 set_thread_flag(TIF_POLLING_NRFLAG); 709 708 710 709 while (1) {
+178
include/asm-powerpc/atomic.h
··· 197 197 #define smp_mb__before_atomic_inc() smp_mb() 198 198 #define smp_mb__after_atomic_inc() smp_mb() 199 199 200 + #ifdef __powerpc64__ 201 + 202 + typedef struct { volatile long counter; } atomic64_t; 203 + 204 + #define ATOMIC64_INIT(i) { (i) } 205 + 206 + #define atomic64_read(v) ((v)->counter) 207 + #define atomic64_set(v,i) (((v)->counter) = (i)) 208 + 209 + static __inline__ void atomic64_add(long a, atomic64_t *v) 210 + { 211 + long t; 212 + 213 + __asm__ __volatile__( 214 + "1: ldarx %0,0,%3 # atomic64_add\n\ 215 + add %0,%2,%0\n\ 216 + stdcx. %0,0,%3 \n\ 217 + bne- 1b" 218 + : "=&r" (t), "=m" (v->counter) 219 + : "r" (a), "r" (&v->counter), "m" (v->counter) 220 + : "cc"); 221 + } 222 + 223 + static __inline__ long atomic64_add_return(long a, atomic64_t *v) 224 + { 225 + long t; 226 + 227 + __asm__ __volatile__( 228 + EIEIO_ON_SMP 229 + "1: ldarx %0,0,%2 # atomic64_add_return\n\ 230 + add %0,%1,%0\n\ 231 + stdcx. %0,0,%2 \n\ 232 + bne- 1b" 233 + ISYNC_ON_SMP 234 + : "=&r" (t) 235 + : "r" (a), "r" (&v->counter) 236 + : "cc", "memory"); 237 + 238 + return t; 239 + } 240 + 241 + #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 242 + 243 + static __inline__ void atomic64_sub(long a, atomic64_t *v) 244 + { 245 + long t; 246 + 247 + __asm__ __volatile__( 248 + "1: ldarx %0,0,%3 # atomic64_sub\n\ 249 + subf %0,%2,%0\n\ 250 + stdcx. %0,0,%3 \n\ 251 + bne- 1b" 252 + : "=&r" (t), "=m" (v->counter) 253 + : "r" (a), "r" (&v->counter), "m" (v->counter) 254 + : "cc"); 255 + } 256 + 257 + static __inline__ long atomic64_sub_return(long a, atomic64_t *v) 258 + { 259 + long t; 260 + 261 + __asm__ __volatile__( 262 + EIEIO_ON_SMP 263 + "1: ldarx %0,0,%2 # atomic64_sub_return\n\ 264 + subf %0,%1,%0\n\ 265 + stdcx. %0,0,%2 \n\ 266 + bne- 1b" 267 + ISYNC_ON_SMP 268 + : "=&r" (t) 269 + : "r" (a), "r" (&v->counter) 270 + : "cc", "memory"); 271 + 272 + return t; 273 + } 274 + 275 + static __inline__ void atomic64_inc(atomic64_t *v) 276 + { 277 + long t; 278 + 279 + __asm__ __volatile__( 280 + "1: ldarx %0,0,%2 # atomic64_inc\n\ 281 + addic %0,%0,1\n\ 282 + stdcx. %0,0,%2 \n\ 283 + bne- 1b" 284 + : "=&r" (t), "=m" (v->counter) 285 + : "r" (&v->counter), "m" (v->counter) 286 + : "cc"); 287 + } 288 + 289 + static __inline__ long atomic64_inc_return(atomic64_t *v) 290 + { 291 + long t; 292 + 293 + __asm__ __volatile__( 294 + EIEIO_ON_SMP 295 + "1: ldarx %0,0,%1 # atomic64_inc_return\n\ 296 + addic %0,%0,1\n\ 297 + stdcx. %0,0,%1 \n\ 298 + bne- 1b" 299 + ISYNC_ON_SMP 300 + : "=&r" (t) 301 + : "r" (&v->counter) 302 + : "cc", "memory"); 303 + 304 + return t; 305 + } 306 + 307 + /* 308 + * atomic64_inc_and_test - increment and test 309 + * @v: pointer of type atomic64_t 310 + * 311 + * Atomically increments @v by 1 312 + * and returns true if the result is zero, or false for all 313 + * other cases. 314 + */ 315 + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 316 + 317 + static __inline__ void atomic64_dec(atomic64_t *v) 318 + { 319 + long t; 320 + 321 + __asm__ __volatile__( 322 + "1: ldarx %0,0,%2 # atomic64_dec\n\ 323 + addic %0,%0,-1\n\ 324 + stdcx. %0,0,%2\n\ 325 + bne- 1b" 326 + : "=&r" (t), "=m" (v->counter) 327 + : "r" (&v->counter), "m" (v->counter) 328 + : "cc"); 329 + } 330 + 331 + static __inline__ long atomic64_dec_return(atomic64_t *v) 332 + { 333 + long t; 334 + 335 + __asm__ __volatile__( 336 + EIEIO_ON_SMP 337 + "1: ldarx %0,0,%1 # atomic64_dec_return\n\ 338 + addic %0,%0,-1\n\ 339 + stdcx. %0,0,%1\n\ 340 + bne- 1b" 341 + ISYNC_ON_SMP 342 + : "=&r" (t) 343 + : "r" (&v->counter) 344 + : "cc", "memory"); 345 + 346 + return t; 347 + } 348 + 349 + #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) 350 + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 351 + 352 + /* 353 + * Atomically test *v and decrement if it is greater than 0. 354 + * The function returns the old value of *v minus 1. 355 + */ 356 + static __inline__ long atomic64_dec_if_positive(atomic64_t *v) 357 + { 358 + long t; 359 + 360 + __asm__ __volatile__( 361 + EIEIO_ON_SMP 362 + "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ 363 + addic. %0,%0,-1\n\ 364 + blt- 2f\n\ 365 + stdcx. %0,0,%1\n\ 366 + bne- 1b" 367 + ISYNC_ON_SMP 368 + "\n\ 369 + 2:" : "=&r" (t) 370 + : "r" (&v->counter) 371 + : "cc", "memory"); 372 + 373 + return t; 374 + } 375 + 376 + #endif /* __powerpc64__ */ 377 + 200 378 #endif /* __KERNEL__ */ 201 379 #endif /* _ASM_POWERPC_ATOMIC_H_ */