[parisc-linux-cvs] (no subject)

Matthew Wilcox willy@ldl.fc.hp.com
Fri, 23 Mar 2001 00:08:21 -0700


This patch tells glibc how to handle stacks which grow upwards.

Also copies the new improved spinlock implementation from the kernel
which avoids doing ldcw while spinning.

Index: linuxthreads/internals.h
===================================================================
RCS file: /home/cvs/parisc/glibc/linuxthreads/internals.h,v
retrieving revision 1.5
diff -u -p -r1.5 internals.h
--- internals.h	2001/01/29 14:39:10	1.5
+++ internals.h	2001/03/23 06:56:08
@@ -379,8 +379,12 @@ static inline pthread_descr thread_self 
   else if (__pthread_nonstandard_stacks)
     return __pthread_find_self();
   else
+#ifdef STACK_GROWS_UP
+    return (pthread_descr)((unsigned long)sp &~ (STACK_SIZE-1));
+#else
     return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1;
 #endif
+#endif /* ! THREAD_SELF */
 }
 
 /* If MEMORY_BARRIER isn't defined in pt-machine.h, assume the architecture
Index: linuxthreads/manager.c
===================================================================
RCS file: /home/cvs/parisc/glibc/linuxthreads/manager.c,v
retrieving revision 1.4
diff -u -p -r1.4 manager.c
--- manager.c	2001/01/16 05:22:35	1.4
+++ manager.c	2001/03/23 06:56:09
@@ -400,7 +400,40 @@ static int pthread_allocate_stack(const 
 
       guardaddr = new_thread_bottom + stacksize/2;
       /* We leave the guard area in the middle unmapped.	*/
-#else  /* !NEED_SEPARATE_REGISTER_STACK */
+#elif defined (STACK_GROWS_UP)
+
+      /* The thread description goes at the bottom of this area, and
+       * the stack  starts directly above it.  We neglect to map the last
+       * page of data in order to prevent a rogue thread overwriting
+       * another's stack data.
+       */
+      if (attr != NULL)
+	{
+	  guardsize = page_roundup (attr->__guardsize, granularity);
+	  stacksize = STACK_SIZE - guardsize;
+	  stacksize = MIN (stacksize,
+			   page_roundup (attr->__stacksize, granularity));
+	}
+      else
+	{
+	  guardsize = granularity;
+	  stacksize = STACK_SIZE - granularity;
+	}
+
+      new_thread = (pthread_descr)((unsigned long)default_new_thread &~ (STACK_SIZE - 1));
+      map_addr = mmap(new_thread, stacksize + guardsize,
+		      PROT_READ | PROT_WRITE | PROT_EXEC,
+		      MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+      if (map_addr == MAP_FAILED)
+	{
+	  return -1;
+	}
+
+      new_thread_bottom = map_addr + sizeof(*new_thread);
+      guardaddr = map_addr + stacksize;
+      guardsize = granularity;
+
+#else  /* !NEED_SEPARATE_REGISTER_STACK && !STACK_GROWS_UP */
 # if FLOATING_STACKS
       if (attr != NULL)
 	{
@@ -463,8 +496,8 @@ static int pthread_allocate_stack(const 
       guardaddr = map_addr;
       if (guardsize > 0)
 	mprotect (guardaddr, guardsize, PROT_NONE);
-# endif
-#endif /* !NEED_SEPARATE_REGISTER_STACK */
+# endif /* !FLOATING_STACKS */
+#endif /* !NEED_SEPARATE_REGISTER_STACK && !STACK_GROWS_UP */
     }
   *out_new_thread = new_thread;
   *out_new_thread_bottom = new_thread_bottom;
@@ -594,6 +627,10 @@ static int pthread_handle_create(pthread
 			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
 			 __pthread_sig_cancel, new_thread);
 	  sigprocmask(SIG_SETMASK, &manager_mask, NULL);
+#elif defined(STACK_GROWS_UP)
+	  pid = __clone(pthread_start_thread_event, (void **) new_thread_bottom,
+			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+			__pthread_sig_cancel, new_thread);
 #else
 	  pid = __clone(pthread_start_thread_event, (void **) new_thread,
 			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
@@ -632,6 +669,10 @@ static int pthread_handle_create(pthread
 		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
 		     __pthread_sig_cancel, new_thread);
       sigprocmask(SIG_SETMASK, &manager_mask, NULL);
+#elif defined(STACK_GROWS_UP)
+      pid = __clone(pthread_start_thread, (void **) new_thread_bottom,
+		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+		    __pthread_sig_cancel, new_thread);
 #else
       pid = __clone(pthread_start_thread, (void **) new_thread,
 		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
@@ -648,6 +689,9 @@ static int pthread_handle_create(pthread
 			    - new_thread_bottom);
 	munmap((caddr_t)new_thread_bottom,
 	       2 * stacksize + new_thread->p_guardsize);
+#elif defined(STACK_GROWS_UP)
+	size_t stacksize = guardaddr - (char *)(new_thread);
+	munmap(new_thread, stacksize);
 #else
 	size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
 	munmap(new_thread_bottom - guardsize, guardsize + stacksize);
@@ -710,9 +754,13 @@ static void pthread_free(pthread_descr t
   /* If initial thread, nothing to free */
   if (!th->p_userstack)
     {
-      size_t guardsize = th->p_guardsize;
       /* Free the stack and thread descriptor area */
       char *guardaddr = th->p_guardaddr;
+      size_t guardsize = th->p_guardsize;
+#ifdef STACK_GROWS_UP
+      size_t stacksize = guardaddr - (char *)th;
+      guardaddr = (char *)th;
+#else
       /* Guardaddr is always set, even if guardsize is 0.  This allows
 	 us to compute everything else.  */
       size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
@@ -720,6 +768,7 @@ static void pthread_free(pthread_descr t
       /* Take account of the register stack, which is below guardaddr.  */
       guardaddr -= stacksize;
       stacksize *= 2;
+#endif
 #endif
       /* Unmap the stack.  */
       munmap(guardaddr, stacksize + guardsize);
Index: linuxthreads/pthread.c
===================================================================
RCS file: /home/cvs/parisc/glibc/linuxthreads/pthread.c,v
retrieving revision 1.5
diff -u -p -r1.5 pthread.c
--- pthread.c	2001/01/16 05:22:35	1.5
+++ pthread.c	2001/03/23 06:56:09
@@ -402,11 +402,17 @@ static void pthread_initialize(void)
   /* Test if compare-and-swap is available */
   __pthread_has_cas = compare_and_swap_is_available();
 #endif
+#ifdef STACK_GROWS_UP
+  /* The initial thread already has all the stack it needs */
+  __pthread_initial_thread_bos = (char *)
+    ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
+#else
   /* For the initial stack, reserve at least STACK_SIZE bytes of stack
      below the current stack address, and align that on a
      STACK_SIZE boundary. */
   __pthread_initial_thread_bos =
     (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
+#endif
   /* Update the descriptor for the initial thread. */
   __pthread_initial_thread.p_pid = __getpid();
   /* Likewise for the resolver state _res.  */
@@ -528,6 +534,11 @@ int __pthread_initialize_manager(void)
 			 THREAD_MANAGER_STACK_SIZE,
 			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
 			 (void *)(long)manager_pipe[0]);
+#elif defined(STACK_GROWS_UP)
+	  pid = __clone(__pthread_manager_event,
+			(void **) __pthread_manager_thread_bos,
+			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
+			(void *)(long)manager_pipe[0]);
 #else
 	  pid = __clone(__pthread_manager_event,
 			(void **) __pthread_manager_thread_tos,
@@ -564,6 +575,10 @@ int __pthread_initialize_manager(void)
 		     THREAD_MANAGER_STACK_SIZE,
 		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
 		     (void *)(long)manager_pipe[0]);
+#elif defined(STACK_GROWS_UP)
+      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
+		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
+		    (void *)(long)manager_pipe[0]);
 #else
       pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
 		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
Index: linuxthreads/sysdeps/hppa/pspinlock.c
===================================================================
RCS file: /home/cvs/parisc/glibc/linuxthreads/sysdeps/hppa/pspinlock.c,v
retrieving revision 1.2
diff -u -p -r1.2 pspinlock.c
--- pspinlock.c	2001/01/16 05:22:39	1.2
+++ pspinlock.c	2001/03/23 06:56:09
@@ -21,18 +21,20 @@
 #include <pthread.h>
 #include "internals.h"
 
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
+#define __ldcw(a) ({ \
+	unsigned __ret; \
+	__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+	__ret; \
+})
+
 int
 __pthread_spin_lock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  do
-    asm volatile ("ldcw %1,%0"
-		  : "=r" (val), "=m" (*lock)
-		  : "m" (*lock));
-  while (!val);
+	while (__ldcw (*lock) == 0)
+		while (*lock == 0) ;
 
-  return 0;
+	return 0;
 }
 weak_alias (__pthread_spin_lock, pthread_spin_lock)
 
@@ -42,9 +44,7 @@ __pthread_spin_trylock (pthread_spinlock
 {
   unsigned int val;
 
-  asm volatile ("ldcw %1,%0"
-		: "=r" (val), "=m" (*lock)
-		: "m" (*lock));
+  val = __ldcw(*lock);
 
   return val ? 0 : EBUSY;
 }
Index: linuxthreads/sysdeps/hppa/pt-machine.h
===================================================================
RCS file: /home/cvs/parisc/glibc/linuxthreads/sysdeps/hppa/pt-machine.h,v
retrieving revision 1.6
diff -u -p -r1.6 pt-machine.h
--- pt-machine.h	2001/01/29 14:39:08	1.6
+++ pt-machine.h	2001/03/23 06:56:09
@@ -31,13 +31,12 @@
 #define CURRENT_STACK_FRAME  stack_pointer
 register char * stack_pointer __asm__ ("%r30");
 
+#define STACK_GROWS_UP
 
 /* The hppa only has one atomic read and modify memory operation,
    load and clear, so hppa spinlocks must use zero to signify that
    someone is holding the lock.  */
 
-#define xstr(s) str(s)
-#define str(s) #s
 /* Spinlock implementation; required.  */
 PT_EI long int
 testandset (_lt_spinlock_t *spinlock)
@@ -51,5 +50,3 @@ testandset (_lt_spinlock_t *spinlock)
 
   return ret == 0;
 }
-#undef str
-#undef xstr