[parisc-linux-cvs] Re: 2.4.19-pa16 DIFF fix circular dependency
Grant Grundler
grundler@dsl2.external.hp.com
Fri, 13 Sep 2002 17:11:34 -0600
Grant Grundler wrote:
> Log message:
> 2.4.19-pa16
> Remove circular dependency between spinlocks, atomic.h, and system.h
> Processor.h changes were accidentally committed with -pa15.
> Fix missing includes in include/linux/fs_struct.h.
This adds the missing spinlock_t.h that -pa15 needed in order to
build with CONFIG_SMP.
I've posted this diff before and basically got no feedback on it.
It works for me. The most questionable part is the change
in fs_struct.h.
Index: include/asm-parisc/atomic.h
===================================================================
RCS file: /var/cvs/linux/include/asm-parisc/atomic.h,v
retrieving revision 1.9
diff -u -p -r1.9 atomic.h
--- include/asm-parisc/atomic.h 21 Mar 2002 15:38:24 -0000 1.9
+++ include/asm-parisc/atomic.h 13 Sep 2002 16:24:20 -0000
@@ -13,8 +13,9 @@
* And probably incredibly slow on parisc. OTOH, we don't
* have to write any serious assembly. prumpf
*/
-
#ifdef CONFIG_SMP
+#include <asm/spinlock_t.h>
+
/* Use an array of spinlocks for our atomic_ts.
** Hash function to index into a different SPINLOCK.
** Since "a" is usually an address, ">>8" makes one spinlock per 64-bytes.
@@ -23,25 +24,33 @@
# define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long) a)>>8)&(ATOMIC_HASH_SIZE-1)])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
-/* copied from <asm/spinlock.h> and modified */
-# define SPIN_LOCK(x) \
- do { while(__ldcw(&(x)->lock) == 0); } while(0)
-
-# define SPIN_UNLOCK(x) \
- do { (x)->lock = 1; } while(0)
-#else
-# define ATOMIC_HASH_SIZE 1
-# define ATOMIC_HASH(a) (0)
-/* copied from <linux/spinlock.h> and modified */
-# define SPIN_LOCK(x) (void)(x)
-
-# define SPIN_UNLOCK(x) do { } while(0)
-#endif
+/* copied from <asm/spinlock.h> and modified.
+ * No CONFIG_DEBUG_SPINLOCK support.
+ *
+ * XXX REVISIT these could be renamed and moved to spinlock_t.h as well
+ */
+#define SPIN_LOCK(x) do { while(__ldcw(&(x)->lock) == 0); } while(0)
+#define SPIN_UNLOCK(x) do { (x)->lock = 1; } while(0)
+
+#else /* CONFIG_SMP */
+
+#define ATOMIC_HASH_SIZE 1
+#define ATOMIC_HASH(a) (0)
+
+#define SPIN_LOCK(x) (void)(x)
+#define SPIN_UNLOCK(x) do { } while(0)
+
+#endif /* CONFIG_SMP */
/* copied from <linux/spinlock.h> and modified */
-#define SPIN_LOCK_IRQSAVE(lock, flags) do { local_irq_save(flags); SPIN_LOCK(lock); } while (0)
-#define SPIN_UNLOCK_IRQRESTORE(lock, flags) do { SPIN_UNLOCK(lock); local_irq_restore(flags); } while (0)
+#define SPIN_LOCK_IRQSAVE(lock, flags) do { \
+ local_irq_save(flags); SPIN_LOCK(lock); \
+} while (0)
+
+#define SPIN_UNLOCK_IRQRESTORE(lock, flags) do { \
+ SPIN_UNLOCK(lock); local_irq_restore(flags); \
+} while (0)
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
@@ -54,22 +63,10 @@ typedef struct {
} atomic_t;
-/*
-** xchg/cmpxchg moved from asm/system.h - ggg
-*/
-
-#if 1
/* This should get optimized out since it's never called.
** Or get a link error if xchg is used "wrong".
*/
extern void __xchg_called_with_bad_pointer(void);
-#else
-static inline void __xchg_called_with_bad_pointer(void)
-{
- extern void panic(const char * fmt, ...);
- panic("xchg called with bad pointer");
-}
-#endif
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
@@ -98,7 +95,7 @@ static __inline__ unsigned long __xchg(u
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
-** o and while we are at it, could __LP64__ code use LDCD too?
+** o and while we are at it, could 64-bit code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
Index: include/asm-parisc/spinlock.h
===================================================================
RCS file: /var/cvs/linux/include/asm-parisc/spinlock.h,v
retrieving revision 1.11
diff -u -p -r1.11 spinlock.h
--- include/asm-parisc/spinlock.h 10 Nov 2001 01:00:39 -0000 1.11
+++ include/asm-parisc/spinlock.h 13 Sep 2002 16:24:20 -0000
@@ -1,35 +1,9 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
-#include <asm/system.h>
-
-/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
- * since it only has load-and-zero.
- */
-
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
-
-#define spin_lock_init(x) do { (x)->lock = 1; } while(0)
-
-#define spin_is_locked(x) ((x)->lock == 0)
-
-#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)
-
-#if 1
-#define spin_lock(x) do { \
- while (__ldcw (&(x)->lock) == 0) \
- while (((x)->lock) == 0) ; } while (0)
-
-#else
-#define spin_lock(x) \
- do { while(__ldcw(&(x)->lock) == 0); } while(0)
-#endif
-
-#define spin_unlock(x) \
- do { (x)->lock = 1; } while(0)
-
-#define spin_trylock(x) (__ldcw(&(x)->lock) != 0)
+#include <asm/spinlock_t.h> /* get spinlock primitives */
+#include <asm/psw.h> /* local_* primitives need PSW_I */
+#include <asm/system_irqsave.h> /* get local_* primitives */
/*
* Read-write spinlocks, allowing multiple readers
Index: include/asm-parisc/spinlock_t.h
===================================================================
RCS file: include/asm-parisc/spinlock_t.h
diff -N include/asm-parisc/spinlock_t.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ include/asm-parisc/spinlock_t.h 13 Sep 2002 16:24:20 -0000
@@ -0,0 +1,40 @@
+#ifndef __PARISC_SPINLOCK_T_H
+#define __PARISC_SPINLOCK_T_H
+
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+ *
+ * Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
+ * since it only has load-and-zero.
+ */
+#define __ldcw(a) ({ \
+ unsigned __ret; \
+ __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+ __ret; \
+})
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int __attribute__((aligned(16))) lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
+
+
+/* Define 6 spinlock primitives that don't depend on anything else. */
+
+#define spin_lock_init(x) do { (x)->lock = 1; } while(0)
+#define spin_is_locked(x) ((x)->lock == 0)
+#define spin_trylock(x) (__ldcw(&(x)->lock) != 0)
+#define spin_unlock(x) do { (x)->lock = 1; } while(0)
+
+#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)
+
+#define spin_lock(x) do { \
+ while (__ldcw (&(x)->lock) == 0) \
+ while ((x)->lock == 0) ; \
+} while (0)
+
+#endif /* __PARISC_SPINLOCK_T_H */
Index: include/asm-parisc/system.h
===================================================================
RCS file: /var/cvs/linux/include/asm-parisc/system.h,v
retrieving revision 1.15
diff -u -p -r1.15 system.h
--- include/asm-parisc/system.h 1 Aug 2001 16:43:05 -0000 1.15
+++ include/asm-parisc/system.h 13 Sep 2002 16:24:20 -0000
@@ -3,6 +3,11 @@
#include <linux/config.h>
#include <asm/psw.h>
+#include <asm/system_irqsave.h>
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock_t.h>
+#endif
/* The program status word as bitfields. */
struct pa_psw {
@@ -51,20 +56,6 @@ extern struct task_struct *_switch_to(st
} while(0)
-
-/* interrupt control */
-#define __save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
-#define __restore_flags(x) __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory")
-#define __cli() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
-#define __sti() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
-
-#define local_irq_save(x) \
- __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" )
-#define local_irq_restore(x) \
- __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" )
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
-
#ifdef CONFIG_SMP
extern void __global_cli(void);
extern void __global_sti(void);
@@ -150,24 +141,5 @@ static inline void set_eiem(unsigned lon
#define smp_wmb() mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
-
-
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
-#define __ldcw(a) ({ \
- unsigned __ret; \
- __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
- __ret; \
-})
-
-
-#ifdef CONFIG_SMP
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
- volatile unsigned int __attribute__((aligned(16))) lock;
-} spinlock_t;
-#endif
#endif
Index: include/asm-parisc/system_irqsave.h
===================================================================
RCS file: include/asm-parisc/system_irqsave.h
diff -N include/asm-parisc/system_irqsave.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ include/asm-parisc/system_irqsave.h 13 Sep 2002 16:24:20 -0000
@@ -0,0 +1,17 @@
+#ifndef __PARISC_SYSTEM_IRQSAVE_H
+#define __PARISC_SYSTEM_IRQSAVE_H
+
+/* interrupt control */
+#define __save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
+#define __restore_flags(x) __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory")
+#define __cli() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
+#define __sti() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
+
+#define local_irq_save(x) \
+ __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" )
+#define local_irq_restore(x) \
+ __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" )
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#endif /* __PARISC_SYSTEM_IRQSAVE_H */
Index: include/linux/fs_struct.h
===================================================================
RCS file: /var/cvs/linux/include/linux/fs_struct.h,v
retrieving revision 1.4
diff -u -p -r1.4 fs_struct.h
--- include/linux/fs_struct.h 1 Aug 2001 15:27:23 -0000 1.4
+++ include/linux/fs_struct.h 13 Sep 2002 16:24:21 -0000
@@ -2,6 +2,13 @@
#define _LINUX_FS_STRUCT_H
#ifdef __KERNEL__
+#include <linux/config.h>
+
+#include <asm/atomic.h>
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+#endif
+
struct fs_struct {
atomic_t count;
rwlock_t lock;