aboutsummaryrefslogtreecommitdiffstats
path: root/packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch
diff options
context:
space:
mode:
Diffstat (limited to 'packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch')
-rw-r--r--packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch2665
1 files changed, 2665 insertions, 0 deletions
diff --git a/packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch b/packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch
index e69de29bb2..7c3175f681 100644
--- a/packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch
+++ b/packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch
@@ -0,0 +1,2665 @@
+--- glibc-2.3.2-orig-debian/elf/dynamic-link.h 2003-10-22 01:06:09.000000000 -0400
++++ glibc-2.3.2/elf/dynamic-link.h 2003-10-22 01:11:53.000000000 -0400
+@@ -39,12 +39,21 @@
+ elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
+ const ElfW(Sym) *sym, const struct r_found_version *version,
+ void *const reloc_addr);
++# if ELF_MACHINE_REL_RELATIVE_NEEDSLINKMAP
++auto void __attribute__((always_inline))
++elf_machine_rel_relative (struct link_map *map, ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
++ void *const reloc_addr);
++auto void __attribute__((always_inline))
++elf_machine_rela_relative (struct link_map *map, ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
++ void *const reloc_addr);
++# else
+ auto void __attribute__((always_inline))
+ elf_machine_rel_relative (ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
+ void *const reloc_addr);
+ auto void __attribute__((always_inline))
+ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
+ void *const reloc_addr);
++# endif
+ # if ELF_MACHINE_NO_RELA || defined ELF_MACHINE_PLT_REL
+ auto void __attribute__((always_inline))
+ elf_machine_lazy_rel (struct link_map *map,
+--- glibc-2.3.2-orig-debian/linuxthreads/attr.c 2003-10-22 01:06:10.000000000 -0400
++++ glibc-2.3.2/linuxthreads/attr.c 2003-10-22 01:07:38.000000000 -0400
+@@ -450,12 +450,19 @@
+ {
+ /* Found the entry. Now we have the info we need. */
+ attr->__stacksize = rl.rlim_cur;
++#ifdef _STACK_GROWS_DOWN
+ attr->__stackaddr = (void *) to;
+-
++#else
++ attr->__stackaddr = (void *) from;
++#endif
+ /* The limit might be too high. This is a bogus
+ situation but try to avoid making it worse. */
+ if ((size_t) attr->__stacksize > (size_t) attr->__stackaddr)
++#ifdef _STACK_GROWS_DOWN
+ attr->__stacksize = (size_t) attr->__stackaddr;
++#else
++ attr->__stacksize = (size_t)(to - from);
++#endif
+
+ /* We succeed and no need to look further. */
+ ret = 0;
+--- glibc-2.3.2-orig-debian/linuxthreads/descr.h 2003-10-22 01:06:10.000000000 -0400
++++ glibc-2.3.2/linuxthreads/descr.h 2003-10-22 01:07:38.000000000 -0400
+@@ -71,7 +71,7 @@
+ /* Atomic counter made possible by compare_and_swap */
+ struct pthread_atomic {
+ long p_count;
+- int p_spinlock;
++ __atomic_lock_t p_spinlock;
+ };
+
+
+--- glibc-2.3.2-orig-debian/linuxthreads/manager.c 2003-10-22 01:06:10.000000000 -0400
++++ glibc-2.3.2/linuxthreads/manager.c 2003-10-22 01:07:38.000000000 -0400
+@@ -70,8 +70,13 @@
+ #else
+ static inline pthread_descr thread_segment(int seg)
+ {
++# ifdef _STACK_GROWS_UP
++ return (pthread_descr)(THREAD_STACK_START_ADDRESS + (seg - 1) * STACK_SIZE)
++ + 1;
++# else
+ return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
+ - 1;
++# endif
+ }
+ #endif
+
+--- glibc-2.3.2-orig-debian/linuxthreads/oldsemaphore.c 2003-10-22 01:03:57.000000000 -0400
++++ glibc-2.3.2/linuxthreads/oldsemaphore.c 2003-10-22 01:07:38.000000000 -0400
+@@ -31,7 +31,7 @@
+
+ typedef struct {
+ long int sem_status;
+- int sem_spinlock;
++ __atomic_lock_t sem_spinlock;
+ } old_sem_t;
+
+ extern int __old_sem_init (old_sem_t *__sem, int __pshared, unsigned int __value);
+--- glibc-2.3.2-orig-debian/linuxthreads/pt-machine.c 2003-10-22 01:03:57.000000000 -0400
++++ glibc-2.3.2/linuxthreads/pt-machine.c 2003-10-22 01:07:38.000000000 -0400
+@@ -19,7 +19,9 @@
+
+ #define PT_EI
+
+-extern long int testandset (int *spinlock);
++#include <pthread.h>
++
++extern long int testandset (__atomic_lock_t *spinlock);
+ extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
+ #include <pt-machine.h>
+--- glibc-2.3.2-orig-debian/linuxthreads/pthread.c 2003-10-22 01:06:16.000000000 -0400
++++ glibc-2.3.2/linuxthreads/pthread.c 2003-10-22 01:07:38.000000000 -0400
+@@ -300,9 +300,9 @@
+ pthread_descr self;
+
+ /* First of all init __pthread_handles[0] and [1] if needed. */
+-# if __LT_SPINLOCK_INIT != 0
+- __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
+- __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
++# ifdef __LT_INITIALIZER_NOT_ZERO
++ __pthread_handles[0].h_lock = __LOCK_ALT_INITIALIZER;
++ __pthread_handles[1].h_lock = __LOCK_ALT_INITIALIZER;
+ # endif
+ # ifndef SHARED
+ /* Unlike in the dynamically linked case the dynamic linker has not
+@@ -370,7 +370,7 @@
+ # endif
+ /* self->p_start_args need not be initialized, it's all zero. */
+ self->p_userstack = 1;
+-# if __LT_SPINLOCK_INIT != 0
++# ifdef __LT_INITIALIZER_NOT_ZERO
+ self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
+ # endif
+ self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
+@@ -384,9 +384,9 @@
+ #else /* USE_TLS */
+
+ /* First of all init __pthread_handles[0] and [1]. */
+-# if __LT_SPINLOCK_INIT != 0
+- __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
+- __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
++# ifdef __LT_INITIALIZER_NOT_ZERO
++ __pthread_handles[0].h_lock = __LOCK_ALT_INITIALIZER;
++ __pthread_handles[1].h_lock = __LOCK_ALT_INITIALIZER;
+ # endif
+ __pthread_handles[0].h_descr = &__pthread_initial_thread;
+ __pthread_handles[1].h_descr = &__pthread_manager_thread;
+@@ -893,7 +893,11 @@
+ /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
+ the manager threads handled specially in thread_self(), so start at 2 */
+ h = __pthread_handles + 2;
++# ifdef _STACK_GROWS_UP
++ while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr)) h++;
++# else
+ while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
++# endif
+ return h->h_descr;
+ }
+
+@@ -908,11 +912,22 @@
+ return manager_thread;
+ h = __pthread_handles + 2;
+ # ifdef USE_TLS
++# ifdef _STACK_GROWS_UP
++ while (h->h_descr == NULL
++ || ! (sp >= h->h_descr->p_stackaddr &&
++ sp < h->h_descr->p_guardaddr))
++# else
+ while (h->h_descr == NULL
+- || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
++ || ! (sp <= h->h_descr->p_stackaddr &&
++ sp >= h->h_bottom))
++# endif
+ h++;
+ # else
++# ifdef _STACK_GROWS_UP
++ while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
++# else
+ while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
++# endif
+ h++;
+ # endif
+ return h->h_descr;
+--- glibc-2.3.2-orig-debian/linuxthreads/spinlock.c 2003-10-22 01:03:57.000000000 -0400
++++ glibc-2.3.2/linuxthreads/spinlock.c 2003-10-22 01:07:38.000000000 -0400
+@@ -24,9 +24,9 @@
+ #include "spinlock.h"
+ #include "restart.h"
+
+-static void __pthread_acquire(int * spinlock);
++static void __pthread_acquire(__atomic_lock_t * spinlock);
+
+-static inline void __pthread_release(int * spinlock)
++static inline void __pthread_release(__atomic_lock_t * spinlock)
+ {
+ WRITE_MEMORY_BARRIER();
+ *spinlock = __LT_SPINLOCK_INIT;
+@@ -269,11 +269,11 @@
+ struct wait_node {
+ struct wait_node *next; /* Next node in null terminated linked list */
+ pthread_descr thr; /* The thread waiting with this node */
+- int abandoned; /* Atomic flag */
++ __atomic_lock_t abandoned; /* Atomic flag */
+ };
+
+ static long wait_node_free_list;
+-static int wait_node_free_list_spinlock;
++__pthread_lock_define_initialized(static, wait_node_free_list_spinlock);
+
+ /* Allocate a new node from the head of the free list using an atomic
+ operation, or else using malloc if that list is empty. A fundamental
+@@ -376,7 +376,7 @@
+ if (self == NULL)
+ self = thread_self();
+
+- wait_node.abandoned = 0;
++ wait_node.abandoned = __LT_SPINLOCK_INIT;
+ wait_node.next = (struct wait_node *) lock->__status;
+ wait_node.thr = self;
+ lock->__status = (long) &wait_node;
+@@ -402,7 +402,7 @@
+ wait_node.thr = self;
+ newstatus = (long) &wait_node;
+ }
+- wait_node.abandoned = 0;
++ wait_node.abandoned = __LT_SPINLOCK_INIT;
+ wait_node.next = (struct wait_node *) oldstatus;
+ /* Make sure the store in wait_node.next completes before performing
+ the compare-and-swap */
+@@ -451,7 +451,7 @@
+ if (self == NULL)
+ self = thread_self();
+
+- p_wait_node->abandoned = 0;
++ p_wait_node->abandoned = __LT_SPINLOCK_INIT;
+ p_wait_node->next = (struct wait_node *) lock->__status;
+ p_wait_node->thr = self;
+ lock->__status = (long) p_wait_node;
+@@ -474,7 +474,7 @@
+ p_wait_node->thr = self;
+ newstatus = (long) p_wait_node;
+ }
+- p_wait_node->abandoned = 0;
++ p_wait_node->abandoned = __LT_SPINLOCK_INIT;
+ p_wait_node->next = (struct wait_node *) oldstatus;
+ /* Make sure the store in wait_node.next completes before performing
+ the compare-and-swap */
+@@ -574,7 +574,7 @@
+ while (p_node != (struct wait_node *) 1) {
+ int prio;
+
+- if (p_node->abandoned) {
++ if (lock_held(&p_node->abandoned)) {
+ /* Remove abandoned node. */
+ #if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+@@ -662,7 +662,7 @@
+ #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+
+ int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+- int * spinlock)
++ __atomic_lock_t * spinlock)
+ {
+ int res;
+
+@@ -699,7 +699,7 @@
+ - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
+ sched_yield(), then sleeping again if needed. */
+
+-static void __pthread_acquire(int * spinlock)
++static void __pthread_acquire(__atomic_lock_t * spinlock)
+ {
+ int cnt = 0;
+ struct timespec tm;
+--- glibc-2.3.2-orig-debian/linuxthreads/spinlock.h 2003-10-22 01:06:10.000000000 -0400
++++ glibc-2.3.2/linuxthreads/spinlock.h 2003-10-22 01:07:38.000000000 -0400
+@@ -33,14 +33,28 @@
+ #endif
+ #endif
+
++/* Define lock_held for all arches that don't need a modified copy. */
++#ifndef __LT_INITIALIZER_NOT_ZERO
++# define lock_held(p) *(p)
++#endif
++
++/* Initliazers for possibly complex structures */
++#ifdef __LT_INITIALIZER_NOT_ZERO
++# define __pthread_lock_define_initialized(CLASS,NAME) \
++ CLASS __atomic_lock_t NAME = __LT_SPINLOCK_ALT_INIT
++#else
++# define __pthread_lock_define_initialized(CLASS,NAME) \
++ CLASS __atomic_lock_t NAME
++#endif
++
+ #if defined(TEST_FOR_COMPARE_AND_SWAP)
+
+ extern int __pthread_has_cas;
+ extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+- int * spinlock);
++ __atomic_lock_t * spinlock);
+
+ static inline int compare_and_swap(long * ptr, long oldval, long newval,
+- int * spinlock)
++ __atomic_lock_t * spinlock)
+ {
+ if (__builtin_expect (__pthread_has_cas, 1))
+ return __compare_and_swap(ptr, oldval, newval);
+@@ -58,7 +72,7 @@
+
+ static inline int
+ compare_and_swap_with_release_semantics (long * ptr, long oldval,
+- long newval, int * spinlock)
++ long newval, __atomic_lock_t * spinlock)
+ {
+ return __compare_and_swap_with_release_semantics (ptr, oldval,
+ newval);
+@@ -67,7 +81,7 @@
+ #endif
+
+ static inline int compare_and_swap(long * ptr, long oldval, long newval,
+- int * spinlock)
++ __atomic_lock_t * spinlock)
+ {
+ return __compare_and_swap(ptr, oldval, newval);
+ }
+@@ -75,10 +89,10 @@
+ #else
+
+ extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+- int * spinlock);
++ __atomic_lock_t * spinlock);
+
+ static inline int compare_and_swap(long * ptr, long oldval, long newval,
+- int * spinlock)
++ __atomic_lock_t * spinlock)
+ {
+ return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
+ }
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/hppa/pspinlock.c 2003-10-22 01:03:57.000000000 -0400
++++ glibc-2.3.2/linuxthreads/sysdeps/hppa/pspinlock.c 2003-10-22 01:07:38.000000000 -0400
+@@ -24,13 +24,10 @@
+ int
+ __pthread_spin_lock (pthread_spinlock_t *lock)
+ {
+- unsigned int val;
++ volatile unsigned int *addr = __ldcw_align (lock);
+
+- do
+- asm volatile ("ldcw %1,%0"
+- : "=r" (val), "=m" (*lock)
+- : "m" (*lock));
+- while (!val);
++ while (__ldcw (addr) == 0)
++ while (*addr == 0) ;
+
+ return 0;
+ }
+@@ -40,13 +37,9 @@
+ int
+ __pthread_spin_trylock (pthread_spinlock_t *lock)
+ {
+- unsigned int val;
++ volatile unsigned int *a = __ldcw_align (lock);
+
+- asm volatile ("ldcw %1,%0"
+- : "=r" (val), "=m" (*lock)
+- : "m" (*lock));
+-
+- return val ? 0 : EBUSY;
++ return __ldcw (a) ? 0 : EBUSY;
+ }
+ weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+@@ -54,7 +47,9 @@
+ int
+ __pthread_spin_unlock (pthread_spinlock_t *lock)
+ {
+- *lock = 1;
++ volatile unsigned int *a = __ldcw_align (lock);
++
++ *a = 1;
+ return 0;
+ }
+ weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+@@ -66,7 +61,9 @@
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+- *lock = 1;
++ volatile unsigned int *a = __ldcw_align (lock);
++
++ *a = 1;
+ return 0;
+ }
+ weak_alias (__pthread_spin_init, pthread_spin_init)
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/hppa/pt-machine.h 2003-10-22 01:06:10.000000000 -0400
++++ glibc-2.3.2/linuxthreads/sysdeps/hppa/pt-machine.h 2003-10-22 01:12:51.000000000 -0400
+@@ -22,41 +22,97 @@
+ #ifndef _PT_MACHINE_H
+ #define _PT_MACHINE_H 1
+
++#include <sys/types.h>
+ #include <bits/initspin.h>
+
+ #ifndef PT_EI
+ # define PT_EI extern inline __attribute__ ((always_inline))
+ #endif
+
+-extern long int testandset (int *spinlock);
+-extern int __compare_and_swap (long int *p, long int oldval, long int newval);
++extern inline long int testandset (__atomic_lock_t *spinlock);
++extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
++extern inline int lock_held (__atomic_lock_t *spinlock);
++extern inline int __load_and_clear (__atomic_lock_t *spinlock);
+
+ /* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+ #define CURRENT_STACK_FRAME stack_pointer
+ register char * stack_pointer __asm__ ("%r30");
+
++/* Get/Set thread-specific pointer. We have to call into the kernel to
++ * modify it, but we can read it in user mode. */
++
++#define THREAD_SELF __get_cr27()
++
++static inline struct _pthread_descr_struct * __get_cr27(void)
++{
++ long cr27;
++ asm("mfctl %%cr27, %0" : "=r" (cr27) : );
++ return (struct _pthread_descr_struct *) cr27;
++}
++
++#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
++
++static inline void __set_cr27(struct _pthread_descr_struct * cr27)
++{
++ asm(
++ "ble 0xe0(%%sr2, %%r0)\n\t"
++ "copy %0, %%r26"
++ : : "r" (cr27) : "r26" );
++}
++
++/* We want the OS to assign stack addresses. */
++#define FLOATING_STACKS 1
++#define ARCH_STACK_MAX_SIZE 8*1024*1024
+
+ /* The hppa only has one atomic read and modify memory operation,
+ load and clear, so hppa spinlocks must use zero to signify that
+- someone is holding the lock. */
++ someone is holding the lock. The address used for the ldcw
++ semaphore must be 16-byte aligned. */
++#define __ldcw(a) ({ \
++ unsigned int __ret; \
++ __asm__ __volatile__("ldcw 0(%2),%0" \
++ : "=r" (__ret), "=m" (*(a)) : "r" (a)); \
++ __ret; \
++})
++
++/* Because malloc only guarantees 8-byte alignment for malloc'd data,
++ and GCC only guarantees 8-byte alignment for stack locals, we can't
++ be assured of 16-byte alignment for atomic lock data even if we
++ specify "__attribute ((aligned(16)))" in the type declaration. So,
++ we use a struct containing an array of four ints for the atomic lock
++ type and dynamically select the 16-byte aligned int from the array
++ for the semaphore. */
++#define __PA_LDCW_ALIGNMENT 16
++#define __ldcw_align(a) ({ \
++ volatile unsigned int __ret = (unsigned int) a; \
++ if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a) \
++ __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
++ (unsigned int *) __ret; \
++})
+
+-#define xstr(s) str(s)
+-#define str(s) #s
+ /* Spinlock implementation; required. */
+-PT_EI long int
+-testandset (int *spinlock)
++PT_EI int
++__load_and_clear (__atomic_lock_t *spinlock)
+ {
+- int ret;
++ volatile unsigned int *a = __ldcw_align (spinlock);
+
+- __asm__ __volatile__(
+- "ldcw 0(%2),%0"
+- : "=r"(ret), "=m"(*spinlock)
+- : "r"(spinlock));
++ return __ldcw (a);
++}
+
+- return ret == 0;
++/* Emulate testandset */
++PT_EI long int
++testandset (__atomic_lock_t *spinlock)
++{
++ return (__load_and_clear(spinlock) == 0);
+ }
+-#undef str
+-#undef xstr
+
++PT_EI int
++lock_held (__atomic_lock_t *spinlock)
++{
++ volatile unsigned int *a = __ldcw_align (spinlock);
++
++ return *a == 0;
++}
++
+ #endif /* pt-machine.h */
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/hppa/pt-machine.h.rej 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/linuxthreads/sysdeps/hppa/pt-machine.h.rej 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,153 @@
++***************
++*** 1,6 ****
++ /* Machine-dependent pthreads configuration and inline functions.
++ hppa version.
++- Copyright (C) 2000, 2002 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Richard Henderson <rth@tamu.edu>.
++
++--- 1,6 ----
++ /* Machine-dependent pthreads configuration and inline functions.
++ hppa version.
+++ Copyright (C) 2000, 2002, 2003 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Richard Henderson <rth@tamu.edu>.
++
++***************
++*** 22,62 ****
++ #ifndef _PT_MACHINE_H
++ #define _PT_MACHINE_H 1
++
++ #include <bits/initspin.h>
++
++ #ifndef PT_EI
++ # define PT_EI extern inline
++ #endif
++
++- extern long int testandset (int *spinlock);
++- extern int __compare_and_swap (long int *p, long int oldval, long int newval);
++
++ /* Get some notion of the current stack. Need not be exactly the top
++ of the stack, just something somewhere in the current frame. */
++ #define CURRENT_STACK_FRAME stack_pointer
++ register char * stack_pointer __asm__ ("%r30");
++
++-
++ /* The hppa only has one atomic read and modify memory operation,
++ load and clear, so hppa spinlocks must use zero to signify that
++- someone is holding the lock. */
++-
++- #define xstr(s) str(s)
++- #define str(s) #s
++ /* Spinlock implementation; required. */
++ PT_EI long int
++- testandset (int *spinlock)
++ {
++- int ret;
++-
++- __asm__ __volatile__(
++- "ldcw 0(%2),%0"
++- : "=r"(ret), "=m"(*spinlock)
++- : "r"(spinlock));
++-
++- return ret == 0;
++ }
++- #undef str
++- #undef xstr
++-
++ #endif /* pt-machine.h */
++--- 22,115 ----
++ #ifndef _PT_MACHINE_H
++ #define _PT_MACHINE_H 1
++
+++ #include <sys/types.h>
++ #include <bits/initspin.h>
++
++ #ifndef PT_EI
++ # define PT_EI extern inline
++ #endif
++
+++ extern inline long int testandset (__atomic_lock_t *spinlock);
+++ extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
+++ extern inline int lock_held (__atomic_lock_t *spinlock);
+++ extern inline int __load_and_clear (__atomic_lock_t *spinlock);
++
++ /* Get some notion of the current stack. Need not be exactly the top
++ of the stack, just something somewhere in the current frame. */
++ #define CURRENT_STACK_FRAME stack_pointer
++ register char * stack_pointer __asm__ ("%r30");
++
+++ /* Get/Set thread-specific pointer. We have to call into the kernel to
+++ modify it, but we can read it in user mode. */
+++
+++ #define THREAD_SELF __get_cr27()
+++
+++ static inline struct _pthread_descr_struct * __get_cr27(void)
+++ {
+++ long cr27;
+++ asm("mfctl %%cr27, %0" : "=r" (cr27) : );
+++ return (struct _pthread_descr_struct *) cr27;
+++ }
+++
+++ #define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
+++
+++ static inline void __set_cr27(struct _pthread_descr_struct * cr27)
+++ {
+++ asm( " ble 0xe0(%%sr2, %%r0)\n"
+++ " copy %0,%%r26"
+++ : : "r" (cr27) : "r26" );
+++ }
+++
+++ /* We want the OS to assign stack addresses. */
+++ #define FLOATING_STACKS 1
+++ #define ARCH_STACK_MAX_SIZE 8*1024*1024
+++
++ /* The hppa only has one atomic read and modify memory operation,
++ load and clear, so hppa spinlocks must use zero to signify that
+++ someone is holding the lock. The address used for the ldcw
+++ semaphore must be 16-byte aligned. */
+++ #define __ldcw(a) ({ \
+++ unsigned int __ret; \
+++ __asm__ __volatile__("ldcw 0(%2),%0" \
+++ : "=r" (__ret), "=m" (*(a)) : "r" (a)); \
+++ __ret; \
+++ })
+++
+++ /* Because malloc only guarantees 8-byte alignment for malloc'd data,
+++ and GCC only guarantees 8-byte alignment for stack locals, we can't
+++ be assured of 16-byte alignment for atomic lock data even if we
+++ specify "__attribute ((aligned(16)))" in the type declaration. So,
+++ we use a struct containing an array of four ints for the atomic lock
+++ type and dynamically select the 16-byte aligned int from the array
+++ for the semaphore. */
+++ #define __PA_LDCW_ALIGNMENT 16
+++ #define __ldcw_align(a) ({ \
+++ volatile unsigned int __ret = (unsigned int) a; \
+++ if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a) \
+++ __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
+++ (unsigned int *) __ret; \
+++ })
+++
++ /* Spinlock implementation; required. */
+++ PT_EI int
+++ __load_and_clear (__atomic_lock_t *spinlock)
+++ {
+++ volatile unsigned int *a = __ldcw_align (spinlock);
+++ return __ldcw (a);
+++ }
+++
+++ /* Emulate testandset */
++ PT_EI long int
+++ testandset (__atomic_lock_t *spinlock)
++ {
+++ return (__load_and_clear(spinlock) == 0);
++ }
+++
+++ PT_EI int
+++ lock_held (__atomic_lock_t *spinlock)
+++ {
+++ volatile unsigned int *a = __ldcw_align (spinlock);
+++ return *a == 0;
+++ }
+++
++ #endif /* pt-machine.h */
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/pthread/bits/libc-lock.h 2003-10-22 01:06:16.000000000 -0400
++++ glibc-2.3.2/linuxthreads/sysdeps/pthread/bits/libc-lock.h 2003-10-22 01:07:38.000000000 -0400
+@@ -71,12 +71,12 @@
+ initialized locks must be set to one due to the lack of normal
+ atomic operations.) */
+
+-#if __LT_SPINLOCK_INIT == 0
++#ifdef __LT_INITIALIZER_NOT_ZERO
+ # define __libc_lock_define_initialized(CLASS,NAME) \
+- CLASS __libc_lock_t NAME;
++ CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
+ #else
+ # define __libc_lock_define_initialized(CLASS,NAME) \
+- CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
++ CLASS __libc_lock_t NAME;
+ #endif
+
+ #define __libc_rwlock_define_initialized(CLASS,NAME) \
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h 2003-10-22 01:03:57.000000000 -0400
++++ glibc-2.3.2/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h 2003-10-22 01:07:38.000000000 -0400
+@@ -22,12 +22,14 @@
+ #define __need_schedparam
+ #include <bits/sched.h>
+
++typedef int __atomic_lock_t;
++
+ /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
+ struct _pthread_fastlock
+ {
+- long int __status; /* "Free" or "taken" or head of waiting list */
+- int __spinlock; /* Used by compare_and_swap emulation. Also,
+- adaptive SMP lock stores spin count here. */
++ long int __status; /* "Free" or "taken" or head of waiting list */
++ __atomic_lock_t __spinlock; /* Used by compare_and_swap emulation. Also,
++ adaptive SMP lock stores spin count here. */
+ };
+
+ #ifndef _PTHREAD_DESCR_DEFINED
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h 2003-10-22 01:03:57.000000000 -0400
++++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h 2003-10-22 01:07:38.000000000 -0400
+@@ -19,9 +19,23 @@
+
+ /* Initial value of a spinlock. PA-RISC only implements atomic load
+ and clear so this must be non-zero. */
+-#define __LT_SPINLOCK_INIT 1
++#define __LT_SPINLOCK_INIT ((__atomic_lock_t) { { 1, 1, 1, 1 } })
++
++/* Initialize global spinlocks without cast, generally macro wrapped */
++#define __LT_SPINLOCK_ALT_INIT { { 1, 1, 1, 1 } }
++
++/* Macros for lock initializers, not using the above definition.
++ The above definition is not used in the case that static initializers
++ use this value. */
++#define __LOCK_INITIALIZER { __LT_SPINLOCK_ALT_INIT, 0 }
++
++/* Used to initialize _pthread_fastlock's in non-static case */
++#define __LOCK_ALT_INITIALIZER ((struct _pthread_fastlock){ __LT_SPINLOCK_INIT, 0 })
++
++/* Used in pthread_atomic initialization */
++#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_ALT_INIT }
++
++/* Tell the rest of the code that the initializer is non-zero without
++ explaining it's internal structure */
++#define __LT_INITIALIZER_NOT_ZERO
+
+-/* Macros for lock initializers, using the above definition. */
+-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,160 @@
++/* Linuxthreads - a simple clone()-based implementation of Posix */
++/* threads for Linux. */
++/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
++/* */
++/* This program is free software; you can redistribute it and/or */
++/* modify it under the terms of the GNU Library General Public License */
++/* as published by the Free Software Foundation; either version 2 */
++/* of the License, or (at your option) any later version. */
++/* */
++/* This program is distributed in the hope that it will be useful, */
++/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
++/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
++/* GNU Library General Public License for more details. */
++
++#if !defined _BITS_TYPES_H && !defined _PTHREAD_H
++# error "Never include <bits/pthreadtypes.h> directly; use <sys/types.h> instead."
++#endif
++
++#ifndef _BITS_PTHREADTYPES_H
++#define _BITS_PTHREADTYPES_H 1
++
++#define __need_schedparam
++#include <bits/sched.h>
++
++/* We need 128-bit alignment for the ldcw semaphore. At most, we are
++ assured of 64-bit alignment for stack locals and malloc'd data. Thus,
++ we use a struct with four ints for the atomic lock type. The locking
++ code will figure out which of the four to use for the ldcw semaphore. */
++typedef volatile struct {
++ int lock[4];
++} __attribute__ ((aligned(16))) __atomic_lock_t;
++
++/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
++struct _pthread_fastlock
++{
++ __atomic_lock_t __spinlock; /* Used by compare_and_swap emulation. Also,
++ adaptive SMP lock stores spin count here. */
++ long int __status; /* "Free" or "taken" or head of waiting list */
++};
++
++#ifndef _PTHREAD_DESCR_DEFINED
++/* Thread descriptors */
++typedef struct _pthread_descr_struct *_pthread_descr;
++# define _PTHREAD_DESCR_DEFINED
++#endif
++
++
++/* Attributes for threads. */
++typedef struct __pthread_attr_s
++{
++ int __detachstate;
++ int __schedpolicy;
++ struct __sched_param __schedparam;
++ int __inheritsched;
++ int __scope;
++ size_t __guardsize;
++ int __stackaddr_set;
++ void *__stackaddr;
++ size_t __stacksize;
++} pthread_attr_t;
++
++
++/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
++
++#ifdef __GLIBC_HAVE_LONG_LONG
++__extension__ typedef long long __pthread_cond_align_t;
++#else
++typedef long __pthread_cond_align_t;
++#endif
++
++typedef struct
++{
++ struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
++ _pthread_descr __c_waiting; /* Threads waiting on this condition */
++ char __padding[48 - sizeof (struct _pthread_fastlock)
++ - sizeof (_pthread_descr) - sizeof (__pthread_cond_align_t)];
++ __pthread_cond_align_t __align;
++} pthread_cond_t;
++
++
++/* Attribute for conditionally variables. */
++typedef struct
++{
++ int __dummy;
++} pthread_condattr_t;
++
++/* Keys for thread-specific data */
++typedef unsigned int pthread_key_t;
++
++
++/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER). */
++/* (The layout is unnatural to maintain binary compatibility
++ with earlier releases of LinuxThreads.) */
++typedef struct
++{
++ int __m_reserved; /* Reserved for future use */
++ int __m_count; /* Depth of recursive locking */
++ _pthread_descr __m_owner; /* Owner thread (if recursive or errcheck) */
++ int __m_kind; /* Mutex kind: fast, recursive or errcheck */
++ struct _pthread_fastlock __m_lock; /* Underlying fast lock */
++} pthread_mutex_t;
++
++
++/* Attribute for mutex. */
++typedef struct
++{
++ int __mutexkind;
++} pthread_mutexattr_t;
++
++
++/* Once-only execution */
++typedef int pthread_once_t;
++
++
++#ifdef __USE_UNIX98
++/* Read-write locks. */
++typedef struct _pthread_rwlock_t
++{
++ struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
++ int __rw_readers; /* Number of readers */
++ _pthread_descr __rw_writer; /* Identity of writer, or NULL if none */
++ _pthread_descr __rw_read_waiting; /* Threads waiting for reading */
++ _pthread_descr __rw_write_waiting; /* Threads waiting for writing */
++ int __rw_kind; /* Reader/Writer preference selection */
++ int __rw_pshared; /* Shared between processes or not */
++} pthread_rwlock_t;
++
++
++/* Attribute for read-write locks. */
++typedef struct
++{
++ int __lockkind;
++ int __pshared;
++} pthread_rwlockattr_t;
++#endif
++
++#ifdef __USE_XOPEN2K
++/* POSIX spinlock data type. */
++typedef __atomic_lock_t pthread_spinlock_t;
++
++/* POSIX barrier. */
++typedef struct {
++ struct _pthread_fastlock __ba_lock; /* Lock to guarantee mutual exclusion */
++ int __ba_required; /* Threads needed for completion */
++ int __ba_present; /* Threads waiting */
++ _pthread_descr __ba_waiting; /* Queue of waiting threads */
++} pthread_barrier_t;
++
++/* barrier attribute */
++typedef struct {
++ int __pshared;
++} pthread_barrierattr_t;
++
++#endif
++
++
++/* Thread identifiers */
++typedef unsigned long int pthread_t;
++
++#endif /* bits/pthreadtypes.h */
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/malloc-machine.h 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/malloc-machine.h 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,73 @@
++/* HP-PARISC macro definitions for mutexes, thread-specific data
++ and parameters for malloc.
++ Copyright (C) 2003 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, write to the Free
++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307 USA. */
++
++#ifndef _MALLOC_MACHINE_H
++#define _MALLOC_MACHINE_H
++
++#undef thread_atfork_static
++
++#include <atomic.h>
++#include <bits/libc-lock.h>
++
++__libc_lock_define (typedef, mutex_t)
++
++/* Since our lock structure does not tolerate being initialized to zero, we must
++ modify the standard function calls made by malloc */
++# define mutex_init(m) \
++ __libc_maybe_call (__pthread_mutex_init, (m, NULL), \
++ (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT),(*(int *)(m))) )
++# define mutex_lock(m) \
++ __libc_maybe_call (__pthread_mutex_lock, (m), \
++ (__load_and_clear(&((m)->__m_lock.__spinlock)), 0))
++# define mutex_trylock(m) \
++ __libc_maybe_call (__pthread_mutex_trylock, (m), \
++ (*(int *)(m) ? 1 : (__load_and_clear(&((m)->__m_lock.__spinlock)), 0)))
++# define mutex_unlock(m) \
++ __libc_maybe_call (__pthread_mutex_unlock, (m), \
++ (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT), (*(int *)(m))) )
++
++/* This is defined by newer gcc version unique for each module. */
++extern void *__dso_handle __attribute__ ((__weak__));
++
++#include <fork.h>
++
++#ifdef SHARED
++# define thread_atfork(prepare, parent, child) \
++ __register_atfork (prepare, parent, child, __dso_handle)
++#else
++# define thread_atfork(prepare, parent, child) \
++ __register_atfork (prepare, parent, child, \
++ &__dso_handle == NULL ? NULL : __dso_handle)
++#endif
++
++/* thread specific data for glibc */
++
++#include <bits/libc-tsd.h>
++
++typedef int tsd_key_t[1]; /* no key data structure, libc magic does it */
++__libc_tsd_define (static, MALLOC) /* declaration/common definition */
++#define tsd_key_create(key, destr) ((void) (key))
++#define tsd_setspecific(key, data) __libc_tsd_set (MALLOC, (data))
++#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (MALLOC))
++
++#include <sysdeps/generic/malloc-machine.h>
++
++#endif /* !defined(_MALLOC_MACHINE_H) */
+--- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,190 @@
++/* Copyright (C) 2003 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, write to the Free
++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307 USA. */
++
++#include <sysdep.h>
++#ifndef __ASSEMBLER__
++# include <linuxthreads/internals.h>
++#endif
++
++#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
++
++# ifndef NO_ERROR
++# define NO_ERROR -0x1000
++# endif
++
++# undef PSEUDO
++# define PSEUDO(name, syscall_name, args) \
++ ENTRY (name) \
++ SINGLE_THREAD_P ASM_LINE_SEP \
++ cmpib,<> 0,%ret0,Lpseudo_cancel ASM_LINE_SEP \
++ nop ASM_LINE_SEP \
++ DO_CALL(syscall_name, args) ASM_LINE_SEP \
++ /* DONE! */ ASM_LINE_SEP \
++ bv 0(2) ASM_LINE_SEP \
++ nop ASM_LINE_SEP \
++ Lpseudo_cancel: ASM_LINE_SEP \
++ /* store return ptr */ ASM_LINE_SEP \
++ stw %rp, -20(%sr0,%sp) ASM_LINE_SEP \
++ /* save syscall args */ ASM_LINE_SEP \
++ PUSHARGS_##args /* MACRO */ ASM_LINE_SEP \
++ STW_PIC ASM_LINE_SEP \
++ CENABLE /* FUNC CALL */ ASM_LINE_SEP \
++ ldo 64(%sp), %sp ASM_LINE_SEP \
++ ldo -64(%sp), %sp ASM_LINE_SEP \
++ LDW_PIC ASM_LINE_SEP \
++ /* restore syscall args */ ASM_LINE_SEP \
++ POPARGS_##args ASM_LINE_SEP \
++ /* save r4 in arg0 stack slot */ ASM_LINE_SEP \
++ stw %r4, -36(%sr0,%sp) ASM_LINE_SEP \
++ /* save mask from cenable */ ASM_LINE_SEP \
++ copy %ret0, %r4 ASM_LINE_SEP \
++ ble 0x100(%sr2,%r0) ASM_LINE_SEP \
++ ldi SYS_ify (syscall_name), %r20 ASM_LINE_SEP \
++ LDW_PIC ASM_LINE_SEP \
++ /* pass mask as arg0 to cdisable */ ASM_LINE_SEP \
++ copy %r4, %r26 ASM_LINE_SEP \
++ copy %ret0, %r4 ASM_LINE_SEP \
++ CDISABLE ASM_LINE_SEP \
++ ldo 64(%sp), %sp ASM_LINE_SEP \
++ ldo -64(%sp), %sp ASM_LINE_SEP \
++ LDW_PIC ASM_LINE_SEP \
++ /* compare error */ ASM_LINE_SEP \
++ ldi NO_ERROR,%r1 ASM_LINE_SEP \
++ /* branch if no error */ ASM_LINE_SEP \
++ cmpb,>>=,n %r1,%r4,Lpre_end ASM_LINE_SEP \
++ nop ASM_LINE_SEP \
++ SYSCALL_ERROR_HANDLER ASM_LINE_SEP \
++ ldo 64(%sp), %sp ASM_LINE_SEP \
++ ldo -64(%sp), %sp ASM_LINE_SEP \
++ /* No need to LDW_PIC */ ASM_LINE_SEP \
++ /* make syscall res value positive */ ASM_LINE_SEP \
++ sub %r0, %r4, %r4 ASM_LINE_SEP \
++ /* store into errno location */ ASM_LINE_SEP \
++ stw %r4, 0(%sr0,%ret0) ASM_LINE_SEP \
++ /* return -1 */ ASM_LINE_SEP \
++ ldo -1(%r0), %ret0 ASM_LINE_SEP \
++ Lpre_end: ASM_LINE_SEP \
++ ldw -20(%sr0,%sp), %rp ASM_LINE_SEP \
++ /* No need to LDW_PIC */ ASM_LINE_SEP \
++ ldw -36(%sr0,%sp), %r4 ASM_LINE_SEP
++
++/* Save arguments into our frame */
++# define PUSHARGS_0 /* nothing to do */
++# define PUSHARGS_1 PUSHARGS_0 stw %r26, -36(%sr0,%sp) ASM_LINE_SEP
++# define PUSHARGS_2 PUSHARGS_1 stw %r25, -40(%sr0,%sp) ASM_LINE_SEP
++# define PUSHARGS_3 PUSHARGS_2 stw %r24, -44(%sr0,%sp) ASM_LINE_SEP
++# define PUSHARGS_4 PUSHARGS_3 stw %r23, -48(%sr0,%sp) ASM_LINE_SEP
++# define PUSHARGS_5 PUSHARGS_4 /* Args are on the stack... */
++# define PUSHARGS_6 PUSHARGS_5
++
++/* Bring them back from the stack */
++# define POPARGS_0 /* nothing to do */
++# define POPARGS_1 POPARGS_0 ldw -36(%sr0,%sp), %r26 ASM_LINE_SEP
++# define POPARGS_2 POPARGS_1 ldw -40(%sr0,%sp), %r25 ASM_LINE_SEP
++# define POPARGS_3 POPARGS_2 ldw -44(%sr0,%sp), %r24 ASM_LINE_SEP
++# define POPARGS_4 POPARGS_3 ldw -48(%sr0,%sp), %r23 ASM_LINE_SEP
++# define POPARGS_5 POPARGS_4 ldw -52(%sr0,%sp), %r22 ASM_LINE_SEP
++# define POPARGS_6 POPARGS_5 ldw -56(%sr0,%sp), %r21 ASM_LINE_SEP
++
++# ifdef IS_IN_libpthread
++# ifdef PIC
++# define CENABLE .import __pthread_enable_asynccancel,code ASM_LINE_SEP \
++ bl __pthread_enable_asynccancel,%r2 ASM_LINE_SEP
++# define CDISABLE .import __pthread_disable_asynccancel,code ASM_LINE_SEP \
++ bl __pthread_disable_asynccancel,%r2 ASM_LINE_SEP
++# else
++# define CENABLE .import __pthread_enable_asynccancel,code ASM_LINE_SEP \
++ bl __pthread_enable_asynccancel,%r2 ASM_LINE_SEP
++# define CDISABLE .import __pthread_disable_asynccancel,code ASM_LINE_SEP \
++ bl __pthread_disable_asynccancel,%r2 ASM_LINE_SEP
++# endif
++# elif !defined NOT_IN_libc
++# ifdef PIC
++# define CENABLE .import __libc_enable_asynccancel,code ASM_LINE_SEP \
++ bl __libc_enable_asynccancel,%r2 ASM_LINE_SEP
++# define CDISABLE .import __libc_disable_asynccancel,code ASM_LINE_SEP \
++ bl __libc_disable_asynccancel,%r2 ASM_LINE_SEP
++# else
++# define CENABLE .import __libc_enable_asynccancel,code ASM_LINE_SEP \
++ bl __libc_enable_asynccancel,%r2 ASM_LINE_SEP
++# define CDISABLE .import __libc_disable_asynccancel,code ASM_LINE_SEP \
++ bl __libc_disable_asynccancel,%r2 ASM_LINE_SEP
++# endif
++# else
++# ifdef PIC
++# define CENABLE .import __librt_enable_asynccancel,code ASM_LINE_SEP \
++ bl __librt_enable_asynccancel,%r2 ASM_LINE_SEP
++# define CDISABLE .import __librt_disable_asynccancel,code ASM_LINE_SEP \
++ bl __librt_disable_asynccancel,%r2 ASM_LINE_SEP
++# else
++# define CENABLE .import __librt_enable_asynccancel,code ASM_LINE_SEP \
++ bl __librt_enable_asynccancel,%r2 ASM_LINE_SEP
++# define CDISABLE .import __librt_disable_asynccancel,code ASM_LINE_SEP \
++ bl __librt_disable_asynccancel,%r2 ASM_LINE_SEP
++# endif
++# endif
++
++/* p_header.multiple_threads is +12 from the pthread_descr struct start,
++ We could have called __get_cr27() but we really want less overhead */
++# define MULTIPLE_THREADS_OFFSET 0xC
++
++/* cr27 has been initialized to 0x0 by kernel */
++# define NO_THREAD_CR27 0x0
++
++# ifdef IS_IN_libpthread
++# define __local_multiple_threads __pthread_multiple_threads
++# elif !defined NOT_IN_libc
++# define __local_multiple_threads __libc_multiple_threads
++# else
++# define __local_multiple_threads __librt_multiple_threads
++# endif
++
++# ifndef __ASSEMBLER__
++ extern int __local_multiple_threads attribute_hidden;
++# define SINGLE_THREAD_P __builtin_expect (__local_multiple_threads == 0, 1)
++# else
++/* This ALT version requires newer kernel support */
++# define SINGLE_THREAD_P_MFCTL \
++ mfctl %cr27, %ret0 ASM_LINE_SEP \
++ cmpib,= NO_THREAD_CR27,%ret0,Lstp ASM_LINE_SEP \
++ nop ASM_LINE_SEP \
++ ldw MULTIPLE_THREADS_OFFSET(%sr0,%ret0),%ret0 ASM_LINE_SEP \
++ Lstp: ASM_LINE_SEP
++# ifdef PIC
++/* Slower version uses GOT to get value of __local_multiple_threads */
++# define SINGLE_THREAD_P \
++ addil LT%__local_multiple_threads, %r19 ASM_LINE_SEP \
++ ldw RT%__local_multiple_threads(%sr0,%r1), %ret0 ASM_LINE_SEP \
++ ldw 0(%sr0,%ret0), %ret0 ASM_LINE_SEP
++# else
++ /* Slow non-pic version using DP */
++# define SINGLE_THREAD_P \
++ addil LR%__local_multiple_threads-$global$,%r27 ASM_LINE_SEP \
++ ldw RR%__local_multiple_threads-$global$(%sr0,%r1),%ret0 ASM_LINE_SEP
++# endif
++# endif
++#elif !defined __ASSEMBLER__
++
++/* This code should never be used but we define it anyhow. */
++# define SINGLE_THREAD_P (1)
++
++#endif
++/* !defined NOT_IN_libc || defined IS_IN_libpthread */
++
++
+--- glibc-2.3.2-orig-debian/localedata/gen-locale.sh 2003-10-22 01:03:54.000000000 -0400
++++ glibc-2.3.2/localedata/gen-locale.sh 2003-10-22 01:07:38.000000000 -0400
+@@ -47,6 +47,7 @@
+ locale=`echo $locfile|sed 's|\([^.]*\)[.].*/LC_CTYPE|\1|'`
+ charmap=`echo $locfile|sed 's|[^.]*[.]\(.*\)/LC_CTYPE|\1|'`
+
++echo "Running \"$0 $common_objpfx $localedef $locfile\""
+ echo "Generating locale $locale.$charmap: this might take a while..."
+ generate_locale `echo $charmap | sed -e s/SJIS/SHIFT_JIS/` $locale \
+ $locale.$charmap
+--- glibc-2.3.2-orig-debian/malloc/thread-m.h 2003-10-22 01:06:10.000000000 -0400
++++ glibc-2.3.2/malloc/thread-m.h 2003-10-22 01:07:38.000000000 -0400
+@@ -59,6 +59,28 @@
+ #define mutex_unlock(m) \
+ __libc_maybe_call2 (pthread_mutex_unlock, (m), (*(int *)(m) = 0))
+
++# if(defined __hppa__)
++/* Since our lock structure does not tolerate being initialized to zero, we must
++ modify the standard function calls made by malloc */
++# undef mutex_init
++# undef mutex_lock
++# undef mutex_trylock
++# undef mutex_unlock
++# define mutex_init(m) \
++ __libc_maybe_call (__pthread_mutex_init, (m, NULL), \
++ (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT),(*(int *)(m))) )
++# define mutex_lock(m) \
++ __libc_maybe_call (__pthread_mutex_lock, (m), \
++ (__load_and_clear(&((m)->__m_lock.__spinlock)), 0))
++# define mutex_trylock(m) \
++ __libc_maybe_call (__pthread_mutex_trylock, (m), \
++ (*(int *)(m) ? 1 : (__load_and_clear(&((m)->__m_lock.__spinlock)), 0)))
++# define mutex_unlock(m) \
++ __libc_maybe_call (__pthread_mutex_unlock, (m), \
++ (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT), (*(int *)(m))) )
++# endif
++/* if(defined __hppa__) */
++
+ #else
+
+ #define mutex_init(m) \
+--- glibc-2.3.2-orig-debian/sysdeps/generic/framestate.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/generic/framestate.c 2003-10-22 01:07:38.000000000 -0400
+@@ -41,7 +41,11 @@
+ if (handle == NULL
+ || (frame_state_for
+ = (framesf) __libc_dlsym (handle, "__frame_state_for")) == NULL)
++#ifndef __USING_SJLJ_EXCEPTIONS__
+ frame_state_for = fallback_frame_state_for;
++#else
++ frame_state_for = abort;
++#endif
+ }
+
+ return frame_state_for (pc, frame_state);
+--- glibc-2.3.2-orig-debian/sysdeps/generic/unwind-dw2.c 2003-10-22 01:06:11.000000000 -0400
++++ glibc-2.3.2/sysdeps/generic/unwind-dw2.c 2003-10-22 01:08:07.000000000 -0400
+@@ -39,7 +39,6 @@
+ #endif
+
+
+-#ifndef __USING_SJLJ_EXCEPTIONS__
+
+ #ifndef STACK_GROWS_DOWNWARD
+ #define STACK_GROWS_DOWNWARD 0
+@@ -1287,4 +1286,3 @@
+ #include "unwind.inc"
+
+ #endif /* _LIBC */
+-#endif /* !USING_SJLJ_EXCEPTIONS */
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/Dist 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/Dist 2003-10-22 01:07:38.000000000 -0400
+@@ -1,2 +1,3 @@
++libgcc-compat.c
+ dl-symaddr.c
+ dl-fptr.c
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/Makefile 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/Makefile 2003-10-22 01:07:38.000000000 -0400
+@@ -22,12 +22,19 @@
+ # CFLAGS-.os += -ffunction-sections
+ LDFLAGS-c_pic.os += -Wl,--unique=.text*
+
+-ifeq ($(subdir),malloc)
+-CFLAGS-malloc.c += -DMALLOC_ALIGNMENT=16
+-endif
+-
+ ifeq ($(subdir),elf)
+ CFLAGS-rtld.c += -mdisable-fpregs
+ dl-routines += dl-symaddr dl-fptr
+ rtld-routines += dl-symaddr dl-fptr
+ endif
++
++ifeq ($(subdir),csu)
++ifeq (yes,$(build-shared))
++# Compatibility
++ifeq (yes,$(have-protected))
++CPPFLAGS-libgcc-compat.c = -DHAVE_DOT_HIDDEN
++endif
++sysdep_routines += libgcc-compat
++shared-only-routines += libgcc-compat
++endif
++endif
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/Versions 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/Versions 2003-10-22 01:07:38.000000000 -0400
+@@ -5,3 +5,8 @@
+ _dl_function_address;
+ }
+ }
++libc {
++ GLIBC_2.2 {
++ __clz_tab;
++ }
++}
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/atomicity.h 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/sysdeps/hppa/atomicity.h 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,55 @@
++/* Low-level functions for atomic operations. HP-PARISC version.
++ Copyright (C) 1997,2001 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, write to the Free
++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307 USA. */
++
++#ifndef _ATOMICITY_H
++#define _ATOMICITY_H 1
++
++#include <inttypes.h>
++
++#warning stub atomicity functions are not atomic
++#warning CAO This will get implemented soon
++
++static inline int
++__attribute__ ((unused))
++exchange_and_add (volatile uint32_t *mem, int val)
++{
++ int result = *mem;
++ *mem += val;
++ return result;
++}
++
++static inline void
++__attribute__ ((unused))
++atomic_add (volatile uint32_t *mem, int val)
++{
++ *mem += val;
++}
++
++static inline int
++__attribute__ ((unused))
++compare_and_swap (volatile long int *p, long int oldval, long int newval)
++{
++ if (*p != oldval)
++ return 0;
++
++ *p = newval;
++ return 1;
++}
++
++#endif /* atomicity.h */
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/dl-fptr.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/dl-fptr.c 2003-10-22 01:07:38.000000000 -0400
+@@ -30,7 +30,7 @@
+ # include <pt-machine.h>
+
+ /* Remember, we use 0 to mean that a lock is taken on PA-RISC. */
+-static int __hppa_fptr_lock = 1;
++static __atomic_lock_t __hppa_fptr_lock = __LT_SPINLOCK_ALT_INIT;
+ #endif
+
+ /* Because ld.so is now versioned, these functions can be in their own
+@@ -127,7 +127,7 @@
+ #ifdef _LIBC_REENTRANT
+ /* Release the lock. Again, remember, zero means the lock is taken! */
+ if (mem == NULL)
+- __hppa_fptr_lock = 1;
++ __hppa_fptr_lock = __LT_SPINLOCK_INIT;
+ #endif
+
+ /* Set bit 30 to indicate to $$dyncall that this is a PLABEL. */
+@@ -180,7 +180,7 @@
+
+ #ifdef _LIBC_REENTRANT
+ /* Release the lock. */
+- __hppa_fptr_lock = 1;
++ __hppa_fptr_lock = __LT_SPINLOCK_INIT;
+ #endif
+ }
+
+@@ -190,6 +190,8 @@
+ Elf32_Addr addr = (Elf32_Addr) address;
+ struct hppa_fptr *f;
+
++ address = (void *)((unsigned long)address &~ 3); /* Clear the bottom two bits. See make_fptr. */
++
+ #ifdef _LIBC_REENTRANT
+ /* Make sure we are alone. */
+ while (testandset (&__hppa_fptr_lock));
+@@ -204,7 +206,7 @@
+
+ #ifdef _LIBC_REENTRANT
+ /* Release the lock. */
+- __hppa_fptr_lock = 1;
++ __hppa_fptr_lock = __LT_SPINLOCK_INIT;
+ #endif
+
+ return addr;
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/dl-machine.h 2003-10-22 01:06:11.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/dl-machine.h 2003-10-22 01:10:26.000000000 -0400
+@@ -1,5 +1,5 @@
+ /* Machine-dependent ELF dynamic relocation inline functions. PA-RISC version.
+- Copyright (C) 1995-1997,1999,2000,2001,2002, 2003
++ Copyright (C) 1995-1997,1999-2003
+ Free Software Foundation, Inc.
+ Contributed by David Huggins-Daines <dhd@debian.org>
+ This file is part of the GNU C Library.
+@@ -29,8 +29,15 @@
+ #include <link.h>
+ #include <assert.h>
+
++# define VALID_ELF_OSABI(osabi) ((osabi == ELFOSABI_SYSV) || (osabi == ELFOSABI_LINUX))
++# define VALID_ELF_ABIVERSION(ver) (ver == 0)
++# define VALID_ELF_HEADER(hdr,exp,size) \
++ memcmp (hdr,exp,size-2) == 0 \
++ && VALID_ELF_OSABI (hdr[EI_OSABI]) \
++ && VALID_ELF_ABIVERSION (hdr[EI_ABIVERSION])
++
+ /* These must match the definition of the stub in bfd/elf32-hppa.c. */
+-#define SIZEOF_PLT_STUB (4*4)
++#define SIZEOF_PLT_STUB (7*4)
+ #define GOT_FROM_PLT_STUB (4*4)
+
+ /* A PLABEL is a function descriptor. Properly they consist of just
+@@ -67,45 +74,41 @@
+ return ehdr->e_machine == EM_PARISC;
+ }
+
+-
+ /* Return the link-time address of _DYNAMIC. */
+ static inline Elf32_Addr
++elf_machine_dynamic (void) __attribute__ ((const));
++
++static inline Elf32_Addr
+ elf_machine_dynamic (void)
+ {
+ Elf32_Addr dynamic;
+
+-#if 0
+- /* Use this method if GOT address not yet set up. */
+- asm (
+-" b,l 1f,%0\n"
++ asm ("b,l 1f,%0\n"
+ " depi 0,31,2,%0\n"
+ "1: addil L'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 8),%0\n"
+ " ldw R'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 12)(%%r1),%0\n"
+- : "=r" (dynamic) : : "r1");
+-#else
+- /* This works because we already have our GOT address available. */
+- dynamic = (Elf32_Addr) &_DYNAMIC;
+-#endif
++ : "=r" (dynamic) : : "r1");
+
+ return dynamic;
+ }
+
+ /* Return the run-time load address of the shared object. */
+ static inline Elf32_Addr
++elf_machine_load_address (void) __attribute__ ((const));
++
++static inline Elf32_Addr
+ elf_machine_load_address (void)
+ {
+- Elf32_Addr dynamic, dynamic_linkaddress;
++ Elf32_Addr dynamic;
+
+ asm (
+ " b,l 1f,%0\n"
+ " depi 0,31,2,%0\n"
+ "1: addil L'_DYNAMIC - ($PIC_pcrel$0 - 8),%0\n"
+-" ldo R'_DYNAMIC - ($PIC_pcrel$0 - 12)(%%r1),%1\n"
+-" addil L'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 16),%0\n"
+-" ldw R'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 20)(%%r1),%0\n"
+- : "=r" (dynamic_linkaddress), "=r" (dynamic) : : "r1");
++" ldo R'_DYNAMIC - ($PIC_pcrel$0 - 12)(%%r1),%0\n"
++ : "=r" (dynamic) : : "r1");
+
+- return dynamic - dynamic_linkaddress;
++ return dynamic - elf_machine_dynamic ();
+ }
+
+ /* Fixup a PLT entry to bounce directly to the function at VALUE. */
+@@ -168,41 +171,39 @@
+ fptr = (struct hppa_fptr *) (reloc->r_offset + l_addr);
+ if (r_sym != 0)
+ {
+- /* Relocate the pointer to the stub. */
+- fptr->func += l_addr;
+- /* Instead of the LTP value, we put the reloc offset
+- here. The trampoline code will load the proper
+- LTP and pass the reloc offset to the fixup
+- function. */
+- fptr->gp = iplt - jmprel;
+ if (!got)
+ {
+ static union {
+ unsigned char c[8];
+ Elf32_Addr i[2];
+ } sig = {{0x00,0xc0,0xff,0xee, 0xde,0xad,0xbe,0xef}};
++ const Elf32_Rela *last_rel;
++
++ last_rel = (const Elf32_Rela *) end_jmprel - 1;
++
++ /* The stub is immediately after the last .plt
++ entry. Rely on .plt relocs being ordered. */
++ if (last_rel->r_offset == 0)
++ return 0;
+
+ /* Find our .got section. It's right after the
+ stub. */
+- got = (Elf32_Addr *) (fptr->func + GOT_FROM_PLT_STUB);
++ got = (Elf32_Addr *) (last_rel->r_offset + l_addr
++ + 8 + SIZEOF_PLT_STUB);
+
+- /* Sanity check to see if the address we are
+- going to check below is within a reasonable
+- approximation of the bounds of the PLT (or,
+- at least, is at an address that won't fault
+- on read). Then check for the magic signature
+- above. */
+- if (fptr->func < (Elf32_Addr) fptr + sizeof(*fptr))
+- return 0;
+- if (fptr->func >
+- ((Elf32_Addr) fptr
+- + SIZEOF_PLT_STUB
+- + ((l->l_info[DT_PLTRELSZ]->d_un.d_val / sizeof (Elf32_Rela))
+- * 8)))
+- return 0;
++ /* Check the magic signature. */
+ if (got[-2] != sig.i[0] || got[-1] != sig.i[1])
+ return 0; /* No lazy linking for you! */
+ }
++
++ /* Relocate the pointer to the stub. */
++ fptr->func = (Elf32_Addr) got - GOT_FROM_PLT_STUB;
++
++ /* Instead of the LTP value, we put the reloc offset
++ here. The trampoline code will load the proper
++ LTP and pass the reloc offset to the fixup
++ function. */
++ fptr->gp = iplt - jmprel;
+ }
+ else
+ {
+@@ -272,22 +273,24 @@
+ " stw %r25,-40(%sp)\n" /* argc */ \
+ " stw %r24,-44(%sp)\n" /* argv */ \
+ \
+- /* We need the LTP, and we need it now. */ \
+- /* $PIC_pcrel$0 points 8 bytes past the current instruction, \
+- just like a branch reloc. This sequence gets us the runtime \
+- address of _DYNAMIC. */ \
++ /* We need the LTP, and we need it now. \
++ $PIC_pcrel$0 points 8 bytes past the current instruction, \
++ just like a branch reloc. This sequence gets us the \
++ runtime address of _DYNAMIC. */ \
+ " bl 0f,%r19\n" \
+ " depi 0,31,2,%r19\n" /* clear priviledge bits */ \
+ "0: addil L'_DYNAMIC - ($PIC_pcrel$0 - 8),%r19\n" \
+ " ldo R'_DYNAMIC - ($PIC_pcrel$0 - 12)(%r1),%r26\n" \
+ \
+- /* Also get the link time address from the first entry of the GOT. */ \
++ /* The link time address is stored in the first entry of the \
++ GOT. */ \
+ " addil L'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 16),%r19\n" \
+ " ldw R'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 20)(%r1),%r20\n" \
+ \
+ " sub %r26,%r20,%r20\n" /* Calculate load offset */ \
+ \
+- /* Rummage through the dynamic entries, looking for DT_PLTGOT. */ \
++ /* Rummage through the dynamic entries, looking for \
++ DT_PLTGOT. */ \
+ " ldw,ma 8(%r26),%r19\n" \
+ "1: cmpib,=,n 3,%r19,2f\n" /* tag == DT_PLTGOT? */ \
+ " cmpib,<>,n 0,%r19,1b\n" \
+@@ -307,8 +310,8 @@
+ | 32 bytes of magic | \
+ |---------------------------------| \
+ | 32 bytes argument/sp save area | \
+- |---------------------------------| ((current->mm->env_end) + 63 & ~63) \
+- | N bytes of slack | \
++ |---------------------------------| ((current->mm->env_end) \
++ | N bytes of slack | + 63 & ~63) \
+ |---------------------------------| \
+ | envvar and arg strings | \
+ |---------------------------------| \
+@@ -376,7 +379,7 @@
+ " bl _dl_init_internal,%r2\n" \
+ " ldo 4(%r23),%r23\n" /* delay slot */ \
+ \
+- /* Reload argc, argv to the registers start.S expects them in (feh) */ \
++ /* Reload argc, argv to the registers start.S expects. */ \
+ " ldw -40(%sp),%r25\n" \
+ " ldw -44(%sp),%r24\n" \
+ \
+@@ -388,8 +391,8 @@
+ " .word 0xdeadbeef\n" \
+ " .previous\n" \
+ \
+- /* %r3 contains a function pointer, we need to mask out the lower \
+- * bits and load the gp and jump address. */ \
++ /* %r3 contains a function pointer, we need to mask out the \
++ lower bits and load the gp and jump address. */ \
+ " depi 0,31,2,%r3\n" \
+ " ldw 0(%r3),%r2\n" \
+ " addil LT'__dl_fini_plabel,%r19\n" \
+@@ -407,43 +410,41 @@
+ Enter with r19 = reloc offset, r20 = got-8, r21 = fixup ltp. */
+ #define TRAMPOLINE_TEMPLATE(tramp_name, fixup_name) \
+ extern void tramp_name (void); \
+- asm ( "\
+- /* Trampoline for " #tramp_name " */ \n\
+- .globl " #tramp_name " \n\
+- .type " #tramp_name ",@function \n\
+-" #tramp_name ": \n\
+- /* Save return pointer */ \n\
+- stw %r2,-20(%sp) \n\
+- /* Save argument registers in the call stack frame. */ \n\
+- stw %r26,-36(%sp) \n\
+- stw %r25,-40(%sp) \n\
+- stw %r24,-44(%sp) \n\
+- stw %r23,-48(%sp) \n\
+- /* Build a call frame. */ \n\
+- stwm %sp,64(%sp) \n\
+- \n\
+- /* Set up args to fixup func. */ \n\
+- ldw 8+4(%r20),%r26 /* got[1] == struct link_map * */ \n\
+- copy %r19,%r25 /* reloc offset */ \n\
+- \n\
+- /* Call the real address resolver. */ \n\
+- bl " #fixup_name ",%r2 \n\
+- copy %r21,%r19 /* delay slot, set fixup func ltp */ \n\
+- \n\
+- ldwm -64(%sp),%sp \n\
+- /* Arguments. */ \n\
+- ldw -36(%sp),%r26 \n\
+- ldw -40(%sp),%r25 \n\
+- ldw -44(%sp),%r24 \n\
+- ldw -48(%sp),%r23 \n\
+- /* Return pointer. */ \n\
+- ldw -20(%sp),%r2 \n\
+- /* Call the real function. */ \n\
+- ldw 0(%r28),%r22 \n\
+- bv %r0(%r22) \n\
+- ldw 4(%r28),%r19 \n\
+-");
+-
++ asm (".globl " #tramp_name "\n" \
++ " .type " #tramp_name ",@function\n" \
++ #tramp_name ":\n" \
++ /* Save return pointer */ \
++ " stw %r2,-20(%sp)\n" \
++ /* Save argument registers in the call stack frame. */ \
++ " stw %r26,-36(%sp)\n" \
++ " stw %r25,-40(%sp)\n" \
++ " stw %r24,-44(%sp)\n" \
++ " stw %r23,-48(%sp)\n" \
++ /* Build a call frame, and save structure pointer. */ \
++ " stwm %r28,64(%sp)\n" \
++ \
++ /* Set up args to fixup func. */ \
++ " ldw 8+4(%r20),%r26\n" /* got[1] == struct link_map * */ \
++ " copy %r19,%r25\n" /* reloc offset */ \
++ \
++ /* Call the real address resolver. */ \
++ " bl " #fixup_name ",%r2\n" \
++ " copy %r21,%r19\n" /* delay slot, set fixup func ltp */ \
++ \
++ " ldw 0(%r28),%r22\n" /* load up the returned func ptr */ \
++ " ldw 4(%r28),%r19\n" \
++ " ldwm -64(%sp),%r28\n" \
++ /* Arguments. */ \
++ " ldw -36(%sp),%r26\n" \
++ " ldw -40(%sp),%r25\n" \
++ " ldw -44(%sp),%r24\n" \
++ " ldw -48(%sp),%r23\n" \
++ /* Call the real function. */ \
++ " bv %r0(%r22)\n" \
++ /* Return pointer. */ \
++ " ldw -20(%sp),%r2\n" \
++ );
++
+ #ifndef PROF
+ #define ELF_MACHINE_RUNTIME_TRAMPOLINE \
+ TRAMPOLINE_TEMPLATE (_dl_runtime_resolve, fixup); \
+@@ -470,16 +471,32 @@
+ /* We only use RELA. */
+ #define ELF_MACHINE_NO_REL 1
+
++/* Tell dynamic-link that PA needs the extra link_map structure */
++#define ELF_MACHINE_REL_RELATIVE_NEEDSLINKMAP 1
++#define ELF_MACHINE_RELA_RELATIVE_NEEDSLINKMAP 1
++
+ /* Return the address of the entry point. */
+ #define ELF_MACHINE_START_ADDRESS(map, start) \
+ DL_FUNCTION_ADDRESS (map, start)
+
++/* We define an initialization functions. This is called very early in
++ * _dl_sysdep_start. */
++#define DL_PLATFORM_INIT dl_platform_init ()
++
++static inline void __attribute__ ((unused))
++dl_platform_init (void)
++{
++ if (GL(dl_platform) != NULL && *GL(dl_platform) == '\0')
++ /* Avoid an empty string which would disturb us. */
++ GL(dl_platform) = NULL;
++}
++
+ #endif /* !dl_machine_h */
+
+ /* These are only actually used where RESOLVE_MAP is defined, anyway. */
+ #ifdef RESOLVE_MAP
+
+-static inline void
++auto void __attribute__((always_inline))
+ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
+ const Elf32_Sym *sym, const struct r_found_version *version,
+ void *const reloc_addr_arg)
+@@ -569,15 +586,15 @@
+ probably haven't relocated the necessary values by this
+ point so we have to find them ourselves. */
+
+- asm ("bl 0f,%0 \n\
+- depi 0,31,2,%0 \n\
+-0: addil L'__boot_ldso_fptr - ($PIC_pcrel$0 - 8),%0 \n\
+- ldo R'__boot_ldso_fptr - ($PIC_pcrel$0 - 12)(%%r1),%1 \n\
+- addil L'__fptr_root - ($PIC_pcrel$0 - 16),%0 \n\
+- ldo R'__fptr_root - ($PIC_pcrel$0 - 20)(%%r1),%2 \n\
+- addil L'__fptr_count - ($PIC_pcrel$0 - 24),%0 \n\
+- ldo R'__fptr_count - ($PIC_pcrel$0 - 28)(%%r1),%3"
+- :
++ asm ("bl 0f,%0\n\t"
++ "depi 0,31,2,%0\n\t"
++ "0:\taddil L'__boot_ldso_fptr - ($PIC_pcrel$0 - 8),%0\n\t"
++ "ldo R'__boot_ldso_fptr - ($PIC_pcrel$0 - 12)(%%r1),%1\n\t"
++ "addil L'__fptr_root - ($PIC_pcrel$0 - 16),%0\n\t"
++ "ldo R'__fptr_root - ($PIC_pcrel$0 - 20)(%%r1),%2\n\t"
++ "addil L'__fptr_count - ($PIC_pcrel$0 - 24),%0\n\t"
++ "ldo R'__fptr_count - ($PIC_pcrel$0 - 28)(%%r1),%3"
++ :
+ "=r" (dot),
+ "=r" (p_boot_ldso_fptr),
+ "=r" (p_fptr_root),
+@@ -636,7 +653,7 @@
+
+ /* hppa doesn't have an R_PARISC_RELATIVE reloc, but uses relocs with
+ ELF32_R_SYM (info) == 0 for a similar purpose. */
+-static inline void
++auto void __attribute__((always_inline))
+ elf_machine_rela_relative (struct link_map *map, Elf32_Addr l_addr,
+ const Elf32_Rela *reloc,
+ void *const reloc_addr_arg)
+@@ -682,7 +699,7 @@
+ *reloc_addr = value;
+ }
+
+-static inline void
++auto void __attribute__((always_inline))
+ elf_machine_lazy_rel (struct link_map *map,
+ Elf32_Addr l_addr, const Elf32_Rela *reloc)
+ {
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/elf/entry.h 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/sysdeps/hppa/elf/entry.h 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,10 @@
++#ifndef __ASSEMBLY__
++extern void _start (void);
++#endif
++
++/* The function's entry point is stored in the first word of the
++ function descriptor (plabel) of _start(). */
++#define ENTRY_POINT ({long int *tmp = (long int *)((long)_start & ~2); tmp[0];})
++
++/* We have to provide a special declaration. */
++#define ENTRY_POINT_DECL(class) class void _start (void);
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fclrexcpt.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/fclrexcpt.c 2003-10-22 01:07:38.000000000 -0400
+@@ -29,7 +29,7 @@
+ __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
+
+ /* Clear all the relevant bits. */
+- sw[0] &= ~(excepts & FE_ALL_EXCEPT) << 27;
++ sw[0] &= ~((excepts & FE_ALL_EXCEPT) << 27);
+ __asm__ ("fldd 0(%0),%%fr0" : : "r" (sw));
+
+ /* Success. */
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fegetenv.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/fegetenv.c 2003-10-22 01:07:38.000000000 -0400
+@@ -23,11 +23,12 @@
+ int
+ fegetenv (fenv_t *envp)
+ {
+- __asm__ (
+- "fstd %%fr0,0(%2)\n"
+- "fstd,ma %%fr1,8(%2)\n"
+- "fstd,ma %%fr2,8(%2)\n"
+- "fstd %%fr3,0(%2)\n"
+- : "=m" (*envp), "=r" (envp) : "1" (envp));
++ fenv_t *temp = envp;
++ __asm__ ( "fstd,ma %%fr0,8(%1)\n"
++ "fstd,ma %%fr1,8(%1)\n"
++ "fstd,ma %%fr2,8(%1)\n"
++ "fstd %%fr3,0(%1)\n"
++ : "=m" (*temp), "+r" (temp)
++ );
+ return 0;
+ }
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/feholdexcpt.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/feholdexcpt.c 2003-10-22 01:07:38.000000000 -0400
+@@ -25,36 +25,29 @@
+ feholdexcept (fenv_t *envp)
+ {
+ fenv_t clear;
++ fenv_t * _regs = envp;
+
+ /* Store the environment. */
+- {
+- fenv_t * _regs = envp;
+- __asm__ (
+- "fstd %%fr0,0(%2)\n"
+- "fstd,ma %%fr1,8(%2)\n"
+- "fstd,ma %%fr2,8(%2)\n"
+- "fstd %%fr3,0(%2)\n"
+- : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
+- memcpy (&clear, envp, sizeof (clear));
+- }
++ __asm__ ( "fstd,ma %%fr0,8(%1)\n"
++ "fstd,ma %%fr1,8(%1)\n"
++ "fstd,ma %%fr2,8(%1)\n"
++ "fstd %%fr3,0(%1)\n"
++ : "=m" (*_regs), "+r" (_regs)
++ );
++
++ memcpy (&clear, envp, sizeof (clear));
+
+- /* Now clear all exceptions. */
+- clear.__status_word &= ~(FE_ALL_EXCEPT << 27);
++ /* Now clear all exceptions (Enable bits and flags) */
++ clear.__status_word &= ~((FE_ALL_EXCEPT << 27) | FE_ALL_EXCEPT);
+ memset (clear.__exception, 0, sizeof (clear.__exception));
+
+- /* And set all exceptions to non-stop. */
+- clear.__status_word &= ~FE_ALL_EXCEPT;
+-
+ /* Load the new environment. */
+- {
+- fenv_t * _regs = &clear + 1;
+- __asm__ (
+- "fldd,mb -8(%2),%%fr3\n"
+- "fldd,mb -8(%2),%%fr2\n"
+- "fldd,mb -8(%2),%%fr1\n"
+- "fldd -8(%2),%%fr0\n"
+- : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
+- }
++ __asm__ ( "fldd,ma -8(%1),%%fr3\n"
++ "fldd,ma -8(%1),%%fr2\n"
++ "fldd,ma -8(%1),%%fr1\n"
++ "fldd 0(%1),%%fr0\n"
++ : "=m" (*_regs), "+r" (_regs)
++ );
+
+ return 0;
+ }
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fesetenv.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/fesetenv.c 2003-10-22 01:07:38.000000000 -0400
+@@ -26,24 +26,23 @@
+ fesetenv (const fenv_t *envp)
+ {
+ fenv_t temp;
++ fenv_t * _regs = &temp;
+
+ /* Install the environment specified by ENVP. But there are a few
+ values which we do not want to come from the saved environment.
+ Therefore, we get the current environment and replace the values
+ we want to use from the environment specified by the parameter. */
+- {
+- fenv_t * _regs = &temp;
+- __asm__ (
+- "fstd %%fr0,0(%2)\n"
+- "fstd,ma %%fr1,8(%2)\n"
+- "fstd,ma %%fr2,8(%2)\n"
+- "fstd %%fr3,0(%2)\n"
+- : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
+- }
+
+- temp.__status_word &= ~(FE_ALL_EXCEPT
+- | (FE_ALL_EXCEPT << 27)
+- | FE_DOWNWARD);
++ __asm__ ( "fstd,ma %%fr0,8(%1)\n"
++ "fstd,ma %%fr1,8(%1)\n"
++ "fstd,ma %%fr2,8(%1)\n"
++ "fstd %%fr3,0(%1)\n"
++ : "=m" (*_regs), "+r" (_regs)
++ );
++
++ temp.__status_word &= ~((FE_ALL_EXCEPT << 27)
++ | FE_DOWNWARD
++ | FE_ALL_EXCEPT);
+ if (envp == FE_DFL_ENV)
+ ;
+ else if (envp == FE_NOMASK_ENV)
+@@ -55,16 +54,13 @@
+ | (FE_ALL_EXCEPT << 27)));
+
+ /* Load the new environment. */
+- {
+- fenv_t * _regs = &temp + 1;
+- __asm__ (
+- "fldd,mb -8(%2),%%fr3\n"
+- "fldd,mb -8(%2),%%fr2\n"
+- "fldd,mb -8(%2),%%fr1\n"
+- "fldd -8(%2),%%fr0\n"
+- : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
+- }
+-
++ __asm__ ( "fldd,ma -8(%1),%%fr3\n"
++ "fldd,ma -8(%1),%%fr2\n"
++ "fldd,ma -8(%1),%%fr1\n"
++ "fldd 0(%2),%%fr0\n"
++ : "=m" (*_regs), "+r" (_regs)
++ );
++
+ /* Success. */
+ return 0;
+ }
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/feupdateenv.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/feupdateenv.c 2003-10-22 01:07:38.000000000 -0400
+@@ -27,14 +27,12 @@
+
+ /* Get the current exception status. */
+ __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
+- sw[0] &= (FE_ALL_EXCEPT << 27);
+-
++ sw[0] &= FE_ALL_EXCEPT;
++ envp->__status_word = envp->__status_word | sw[0];
++
+ /* Install new environment. */
+ fesetenv (envp);
+
+- /* Raise the saved exception. */
+- feraiseexcept (sw[0] >> 27);
+-
+ /* Success. */
+ return 0;
+ }
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fraiseexcpt.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/fraiseexcpt.c 2003-10-22 01:07:38.000000000 -0400
+@@ -22,6 +22,9 @@
+ #include <float.h>
+ #include <math.h>
+
++/* Please see section 10,
++ page 10-5 "Delayed Trapping" in the PA-RISC 2.0 Architecture manual */
++
+ int
+ feraiseexcept (int excepts)
+ {
+@@ -33,56 +36,64 @@
+
+ /* We do these bits in assembly to be certain GCC doesn't optimize
+ away something important, and so we can force delayed traps to
+- occur. */
+-
+- /* FIXME: These all need verification! */
++ occur. */
+
+- /* First: invalid exception. */
++ /* We use "fldd 0(%%sr0,%%sp),%0" to flush the delayed exception */
++
++ /* First: Invalid exception. */
+ if (excepts & FE_INVALID)
+ {
+ /* One example of a invalid operation is 0 * Infinity. */
+ double d = HUGE_VAL;
+- __asm__ __volatile__ ("fmpy,dbl %1,%%fr0,%0\n\t"
+- /* FIXME: is this a proper trap barrier? */
+- "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d));
++ __asm__ __volatile__ (
++ " fcpy,dbl %%fr0,%%fr22\n"
++ " fmpy,dbl %0,%%fr22,%0\n"
++ " fldd 0(%%sr0,%%sp),%0"
++ : "+f" (d) : : "%fr22" );
+ }
+
+- /* Next: division by zero. */
++ /* Second: Division by zero. */
+ if (excepts & FE_DIVBYZERO)
+ {
+ double d = 1.0;
+- __asm__ __volatile__ ("fdiv,dbl %1,%%fr0,%0\n\t"
+- "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d));
++ __asm__ __volatile__ (
++ " fcpy,dbl %%fr0,%%fr22\n"
++ " fdiv,dbl %0,%%fr22,%0\n"
++ " fldd 0(%%sr0,%%sp),%0"
++ : "+f" (d) : : "%fr22" );
+ }
+
+- /* Next: overflow. */
+- /* FIXME: Compare with IA-64 - do we have the same problem? */
++ /* Third: Overflow. */
+ if (excepts & FE_OVERFLOW)
+ {
+ double d = DBL_MAX;
+-
+- __asm__ __volatile__ ("fmpy,dbl %1,%1,%0\n\t"
+- "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d));
++ __asm__ __volatile__ (
++ " fadd,dbl %0,%0,%0\n"
++ " fldd 0(%%sr0,%%sp),%0"
++ : "+f" (d) );
+ }
+
+- /* Next: underflow. */
++ /* Fourth: Underflow. */
+ if (excepts & FE_UNDERFLOW)
+ {
+ double d = DBL_MIN;
+- double e = 69.69;
+-
+- __asm__ __volatile__ ("fdiv,dbl %1,%2,%0\n\t"
+- "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d), "f" (e));
++ double e = 3.0;
++ __asm__ __volatile__ (
++ " fdiv,dbl %0,%1,%0\n"
++ " fldd 0(%%sr0,%%sp),%0"
++ : "+f" (d) : "f" (e) );
+ }
+
+- /* Last: inexact. */
++ /* Fifth: Inexact */
+ if (excepts & FE_INEXACT)
+ {
+- double d = 1.0;
+- double e = M_PI;
+-
+- __asm__ __volatile__ ("fdiv,dbl %1,%2,%0\n\t"
+- "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d), "f" (e));
++ double d = M_PI;
++ double e = 69.69;
++ __asm__ __volatile__ (
++ " fdiv,dbl %0,%1,%%fr22\n"
++ " fcnvfxt,dbl,sgl %%fr22,%%fr22L\n"
++ " fldd 0(%%sr0,%%sp),%%fr22"
++ : : "f" (d), "f" (e) : "%fr22" );
+ }
+
+ /* Success. */
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fsetexcptflg.c 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/fpu/fsetexcptflg.c 2003-10-22 01:07:38.000000000 -0400
+@@ -29,8 +29,7 @@
+ /* Get the current status word. */
+ __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
+
+- /* Install the new exception flags bits. */
+- sw[0] &= ~(excepts & (FE_ALL_EXCEPT >> 27));
++ /* Clear exception flags, and install new neable trap bits */
+ sw[0] |= (*flagp & excepts & FE_ALL_EXCEPT) << 27;
+
+ /* Store the new status word. */
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/libgcc-compat.c 1969-12-31 19:00:00.000000000 -0500
++++ glibc-2.3.2/sysdeps/hppa/libgcc-compat.c 2003-10-22 01:07:38.000000000 -0400
+@@ -0,0 +1,43 @@
++/* pre-.hidden libgcc compatibility
++ Copyright (C) 2002 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Randolph Chung
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, write to the Free
++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307 USA. */
++
++
++#include <stdint.h>
++#include <shlib-compat.h>
++
++#if SHLIB_COMPAT(libc, GLIBC_2_0, GLIBC_2_2_6)
++
++symbol_version (__clz_tab_internal, __clz_tab, GLIBC_2.2);
++
++typedef unsigned int UQItype __attribute__ ((mode (QI)));
++
++const UQItype __clz_tab_internal[] =
++{
++ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
++ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
++ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
++ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
++ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
++ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
++ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
++ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
++};
++
++#endif
+--- glibc-2.3.2-orig-debian/sysdeps/hppa/sysdep.h 2003-10-22 01:03:47.000000000 -0400
++++ glibc-2.3.2/sysdeps/hppa/sysdep.h 2003-10-22 01:07:38.000000000 -0400
+@@ -70,6 +70,7 @@
+ #define PSEUDO_END(name) \
+ END (name)
+
++#undef JUMPTARGET
+ #define JUMPTARGET(name) name
+ #define SYSCALL_PIC_SETUP /* Nothing. */
+
+--- glibc-2.3.2-orig-debian/sysdeps/posix/getaddrinfo.c 2003-10-22 01:06:12.000000000 -0400
++++ glibc-2.3.2/sysdeps/posix/getaddrinfo.c 2003-10-22 01:07:38.000000000 -0400
+@@ -53,6 +53,7 @@
+ #include <sys/utsname.h>
+ #include <net/if.h>
+ #include <nsswitch.h>
++#include <stdbool.h>
+
+ #define GAIH_OKIFUNSPEC 0x0100
+ #define GAIH_EAI ~(GAIH_OKIFUNSPEC)
+--- glibc-2.3.2-orig-debian/sysdeps/unix/Makefile 2003-10-22 01:06:12.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/Makefile 2003-10-22 01:07:38.000000000 -0400
+@@ -295,6 +295,7 @@
+ $(..)sysdeps/unix/Makefile
+ $(make-target-directory)
+ (echo '#include <errno.h>'; \
++ echo 'extern long int _no_syscall(void);'; \
+ echo 'long int _no_syscall (void)'; \
+ echo '{ __set_errno (ENOSYS); return -1L; }'; \
+ for call in $(unix-stub-syscalls); do \
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/clone.S 2003-10-22 01:03:48.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/clone.S 2003-10-22 01:07:38.000000000 -0400
+@@ -28,6 +28,8 @@
+
+ /* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg) */
+
++#warning CAO: __clone needs verification
++
+ .text
+ ENTRY(__clone)
+ /* FIXME: I have no idea how profiling works on hppa. */
+@@ -42,6 +44,11 @@
+ stwm %arg0,64(%arg1)
+ stw %arg3,-60(%arg1)
+
++ /* Save the PIC register. */
++#ifdef PIC
++ stw %r19,-32(%sr0, %sp) /* parent */
++#endif
++
+ /* Do the system call */
+ copy %arg2,%arg0
+ ble 0x100(%sr2,%r0)
+@@ -53,19 +60,31 @@
+
+ comib,=,n 0,%ret0,thread_start
+
+- /* Successful return from the parent */
++ /* Successful return from the parent
++ No need to restore the PIC register,
++ since we return immediately. */
++
+ bv %r0(%rp)
+ nop
+
+ /* Something bad happened -- no child created */
+ .Lerror:
++
++ /* Restore the PIC register on error */
++#ifdef PIC
++ ldw -32(%sr0, %sp), %r19 /* parent */
++#endif
++
+ b __syscall_error
+ sub %r0,%ret0,%arg0
+
+ thread_start:
++
+ /* Load up the arguments. */
+- ldw -60(%sp),%arg0
+- ldw -64(%sp),%r22
++ ldw -60(%sr0, %sp),%arg0
++ ldw -64(%sr0, %sp),%r22
++
++ /* FIXME: Don't touch the childs PIC register? */
+
+ /* Call the user's function */
+ bl $$dyncall,%r31
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/kernel_stat.h 2003-10-22 01:03:48.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/kernel_stat.h 2003-10-22 01:07:38.000000000 -0400
+@@ -1,30 +1,30 @@
+-/* definition of "struct stat" from the kernel */
++/* Definition of 'struct stat' taken from kernel, please keep up to date */
+ struct kernel_stat {
+- unsigned long st_dev; /* dev_t is 32 bits on parisc */
+- unsigned long st_ino; /* 32 bits */
++ unsigned int st_dev; /* dev_t is 32 bits on parisc */
++ unsigned int st_ino; /* 32 bits */
+ unsigned short st_mode; /* 16 bits */
+ unsigned short st_nlink; /* 16 bits */
+ unsigned short st_reserved1; /* old st_uid */
+ unsigned short st_reserved2; /* old st_gid */
+- unsigned long st_rdev;
+- unsigned long st_size;
++ unsigned int st_rdev;
++ unsigned int st_size;
+ struct timespec st_atim;
+- struct timespec st_mtim;
+- struct timespec st_ctim;
+- long st_blksize;
+- long st_blocks;
+- unsigned long __unused1; /* ACL stuff */
+- unsigned long __unused2; /* network */
+- unsigned long __unused3; /* network */
+- unsigned long __unused4; /* cnodes */
++ struct timespec st_mtim;
++ struct timespec st_ctim;
++ int st_blksize;
++ int st_blocks;
++ unsigned int __unused1; /* ACL stuff */
++ unsigned int __unused2; /* network */
++ unsigned int __unused3; /* network */
++ unsigned int __unused4; /* cnodes */
+ unsigned short __unused5; /* netsite */
+ short st_fstype;
+- unsigned long st_realdev;
++ unsigned int st_realdev;
+ unsigned short st_basemode;
+ unsigned short st_spareshort;
+- unsigned long st_uid;
+- unsigned long st_gid;
+- unsigned long st_spare4[3];
++ unsigned int st_uid;
++ unsigned int st_gid;
++ unsigned int st_spare4[3];
+ };
+
+ #define _HAVE_STAT_NSEC
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/mmap.c 2003-10-22 01:03:48.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/mmap.c 2003-10-22 01:07:38.000000000 -0400
+@@ -37,7 +37,7 @@
+
+ __ptr_t ret;
+
+- ret = INLINE_SYSCALL(mmap, 6, addr, len, prot, flags, fd, offset);
++ ret = (__ptr_t)INLINE_SYSCALL(mmap, 6, addr, len, prot, flags, fd, offset);
+
+ /* check if it's really a negative number */
+ if(((unsigned long)ret & 0xfffff000) == 0xfffff000)
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/sysdep.c 2003-10-22 01:03:48.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/sysdep.c 2003-10-22 01:07:38.000000000 -0400
+@@ -19,6 +19,10 @@
+ #include <sysdep.h>
+ #include <errno.h>
+
++extern int __syscall_error(int err_no);
++extern int syscall (int sysnum, int arg0, int arg1, int arg2,
++ int arg3, int arg4, int arg5);
++
+ /* This routine is jumped to by all the syscall handlers, to stash
+ an error number into errno. */
+ int
+@@ -30,25 +34,31 @@
+
+
+ /* HPPA implements syscall() in 'C'; the assembler version would
+- typically be in syscall.S. */
+-
++ typically be in syscall.S. Also note that we have INLINE_SYSCALL,
++ INTERNAL_SYSCALL, and all the generated pure assembly syscall wrappers.
++ How often the function is used is unknown. */
+ int
+ syscall (int sysnum, int arg0, int arg1, int arg2, int arg3, int arg4, int arg5)
+ {
+- long __sys_res;
+- {
+- register unsigned long __res asm("r28");
+- LOAD_ARGS_6(arg0, arg1, arg2, arg3, arg4, arg5)
+- asm volatile ("ble 0x100(%%sr2, %%r0)\n\t"
+- "copy %1, %%r20"
+- : "=r" (__res)
+- : "r" (sysnum) ASM_ARGS_6);
+- __sys_res = __res;
+- }
+- if ((unsigned long) __sys_res >= (unsigned long)-4095)
+- {
+- __set_errno(-__sys_res);
+- __sys_res = -1;
+- }
+- return __sys_res;
++ /* FIXME: Keep this matching INLINE_SYSCALL for hppa */
++ long __sys_res;
++ {
++ register unsigned long __res asm("r28");
++ LOAD_ARGS_6(arg0, arg1, arg2, arg3, arg4, arg5)
++ asm volatile (
++ STW_ASM_PIC
++ " ble 0x100(%%sr2, %%r0) \n"
++ " copy %1, %%r20 \n"
++ LDW_ASM_PIC
++ : "=r" (__res)
++ : "r" (sysnum) ASM_ARGS_6
++ : CALL_CLOB_REGS CLOB_ARGS_6
++ );
++ __sys_res = __res;
++ }
++ if ((unsigned long) __sys_res >= (unsigned long)-4095){
++ __set_errno(-__sys_res);
++ __sys_res = -1;
++ }
++ return __sys_res;
+ }
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/sysdep.h 2003-10-22 01:06:12.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/sysdep.h 2003-10-22 01:07:38.000000000 -0400
+@@ -31,6 +31,26 @@
+ #undef SYS_ify
+ #define SYS_ify(syscall_name) (__NR_##syscall_name)
+
++/* Included for older kernels whose headers
++ are missing the define */
++#ifndef __NR_semtimedop
++# define __NR_semtimedop 228
++#endif
++
++# ifdef PIC
++/* WARNING: CANNOT BE USED IN A NOP! */
++# define STW_PIC stw %r19, -32(%sr0, %sp) ASM_LINE_SEP
++# define LDW_PIC ldw -32(%sr0, %sp), %r19 ASM_LINE_SEP
++# define STW_ASM_PIC " copy %%r19, %%r4\n"
++# define LDW_ASM_PIC " copy %%r4, %%r19\n"
++# define USING_GR4 "%r4",
++# else
++# define STW_PIC ASM_LINE_SEP
++# define LDW_PIC ASM_LINE_SEP
++# define STW_ASM_PIC " \n"
++# define LDW_ASM_PIC " \n"
++# define USING_GR4
++# endif
+
+ #ifdef __ASSEMBLER__
+
+@@ -77,20 +97,13 @@
+ .text ASM_LINE_SEP \
+ .export C_SYMBOL_NAME(name) ASM_LINE_SEP \
+ .type C_SYMBOL_NAME(name),@function ASM_LINE_SEP \
+- C_LABEL(name) \
+- CALL_MCOUNT
+-
+-#define ret \
+- bv 0(2) ASM_LINE_SEP \
+- nop
+-
+-#define ret_NOERRNO \
+- bv 0(2) ASM_LINE_SEP \
+- nop
++ C_LABEL(name) ASM_LINE_SEP \
++ CALL_MCOUNT ASM_LINE_SEP
+
+ #undef END
+-#define END(name) \
+-1: .size C_SYMBOL_NAME(name),1b-C_SYMBOL_NAME(name)
++#define END(name) \
++1: ASM_LINE_SEP \
++.size C_SYMBOL_NAME(name),1b-C_SYMBOL_NAME(name) ASM_LINE_SEP \
+
+ /* If compiled for profiling, call `mcount' at the start of each function. */
+ /* No, don't bother. gcc will put the call in for us. */
+@@ -110,27 +123,83 @@
+ nop
+ */
+
+-#define PSEUDO(name, syscall_name, args) \
+- ENTRY (name) \
+- DO_CALL(syscall_name, args) ASM_LINE_SEP \
+- nop
++#define PSEUDO(name, syscall_name, args) \
++ ENTRY (name) \
++ DO_CALL(syscall_name, args) ASM_LINE_SEP \
++ nop ASM_LINE_SEP
++
++#define ret \
++ /* Return value set by ERRNO code */ ASM_LINE_SEP \
++ bv 0(2) ASM_LINE_SEP \
++ nop ASM_LINE_SEP
+
+ #undef PSEUDO_END
+-#define PSEUDO_END(name) \
++#define PSEUDO_END(name) \
+ END (name)
+
+-#define PSEUDO_NOERRNO(name, syscall_name, args) \
+- ENTRY (name) \
+- DO_CALL(syscall_name, args) ASM_LINE_SEP \
+- nop
++/* We don't set the errno on the return from the syscall */
++#define PSEUDO_NOERRNO(name, syscall_name, args) \
++ ENTRY (name) \
++ DO_CALL_NOERRNO(syscall_name, args) ASM_LINE_SEP \
++ nop ASM_LINE_SEP
+
++#define ret_NOERRNO ret
++
+ #undef PSEUDO_END_NOERRNO
+-#define PSEUDO_END_NOERRNO(name) \
++#define PSEUDO_END_NOERRNO(name) \
+ END (name)
+
++/* This has to return the error value */
++#undef PSEUDO_ERRVAL
++#define PSEUDO_ERRVAL(name, syscall_name, args) \
++ ENTRY(name) \
++ DO_CALL_ERRVAL(syscall_name, args) ASM_LINE_SEP \
++ nop ASM_LINE_SEP
++
++#define ret_ERRVAL ret
++
++#undef PSEUDO_END_ERRVAL
++#define PSEUDO_END_ERRVAL(name) \
++ END(name)
++
++#undef JUMPTARGET
+ #define JUMPTARGET(name) name
+ #define SYSCALL_PIC_SETUP /* Nothing. */
+
++
++/* All the syscall assembly macros rely on finding the approriate
++ SYSCALL_ERROR_LABEL or rather HANDLER. */
++
++/* int * __errno_location(void) so you have to store your value
++ into the return address! */
++#define DEFAULT_SYSCALL_ERROR_HANDLER \
++ .import __errno_location,code ASM_LINE_SEP \
++ /* branch to errno handler */ ASM_LINE_SEP \
++ bl __errno_location,%rp ASM_LINE_SEP
++
++/* Here are the myriad of configuration options that the above can
++ work for... what we've done is provide the framework for future
++ changes if required to each section */
++
++#ifdef PIC
++# if RTLD_PRIVATE_ERRNO
++# define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
++# else /* !RTLD_PRIVATE_ERRNO */
++# if defined _LIBC_REENTRANT
++# define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
++# else /* !_LIBC_REENTRANT */
++# define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
++# endif /* _LIBC_REENTRANT */
++# endif /* RTLD_PRIVATE_ERRNO */
++#else
++# ifndef _LIBC_REENTRANT
++# define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
++# else
++# define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
++# endif
++#endif
++
++
+ /* Linux takes system call arguments in registers:
+ syscall number gr20
+ arg 1 gr26
+@@ -159,25 +228,61 @@
+
+ #undef DO_CALL
+ #define DO_CALL(syscall_name, args) \
+- DOARGS_##args \
++ DOARGS_##args ASM_LINE_SEP \
++ STW_PIC ASM_LINE_SEP \
++ /* Do syscall, delay loads # */ ASM_LINE_SEP \
+ ble 0x100(%sr2,%r0) ASM_LINE_SEP \
+ ldi SYS_ify (syscall_name), %r20 ASM_LINE_SEP \
+ ldi -0x1000,%r1 ASM_LINE_SEP \
+ cmpb,>>=,n %r1,%ret0,0f ASM_LINE_SEP \
+- stw %rp, -20(%sr0,%r30) ASM_LINE_SEP \
+- stw %ret0, -24(%sr0,%r30) ASM_LINE_SEP \
+- .import __errno_location,code ASM_LINE_SEP \
+- bl __errno_location,%rp ASM_LINE_SEP \
+- ldo 64(%r30), %r30 ASM_LINE_SEP \
+- ldo -64(%r30), %r30 ASM_LINE_SEP \
+- ldw -24(%r30), %r26 ASM_LINE_SEP \
++ /* save rp or we get lost */ ASM_LINE_SEP \
++ stw %rp, -20(%sr0,%sp) ASM_LINE_SEP \
++ /* Restore r19 from frame */ ASM_LINE_SEP \
++ LDW_PIC ASM_LINE_SEP \
++ stw %ret0, -24(%sr0,%sp) ASM_LINE_SEP \
++ SYSCALL_ERROR_HANDLER ASM_LINE_SEP \
++ /* create frame */ ASM_LINE_SEP \
++ ldo 64(%sp), %sp ASM_LINE_SEP \
++ ldo -64(%sp), %sp ASM_LINE_SEP \
++ /* OPTIMIZE: Don't reload r19 */ ASM_LINE_SEP \
++ /* do a -1*syscall_ret0 */ ASM_LINE_SEP \
++ ldw -24(%sr0,%sp), %r26 ASM_LINE_SEP \
+ sub %r0, %r26, %r26 ASM_LINE_SEP \
++ /* Store into errno location */ ASM_LINE_SEP \
+ stw %r26, 0(%sr0,%ret0) ASM_LINE_SEP \
++ /* return -1 as error */ ASM_LINE_SEP \
+ ldo -1(%r0), %ret0 ASM_LINE_SEP \
+- ldw -20(%r30), %rp ASM_LINE_SEP \
++ ldw -20(%sr0,%sp), %rp ASM_LINE_SEP \
+ 0: ASM_LINE_SEP \
++ UNDOARGS_##args ASM_LINE_SEP
++
++/* We do nothing with the return, except hand it back to someone else */
++#undef DO_CALL_NOERRNO
++#define DO_CALL_NOERRNO(syscall_name, args) \
++ DOARGS_##args \
++ /* No need to store r19 */ ASM_LINE_SEP \
++ ble 0x100(%sr2,%r0) ASM_LINE_SEP \
++ ldi SYS_ify (syscall_name), %r20 ASM_LINE_SEP \
++ /* Caller will restore r19 */ ASM_LINE_SEP \
+ UNDOARGS_##args
+
++/* Here, we return the ERRVAL in assembly, note we don't call the
++ error handler function, but we do 'negate' the return _IF_
++ it's an error. Not sure if this is the right semantic. */
++
++#undef DO_CALL_ERRVAL
++#define DO_CALL_ERRVAL(syscall_name, args) \
++ DOARGS_##args ASM_LINE_SEP \
++ /* No need to store r19 */ ASM_LINE_SEP \
++ ble 0x100(%sr2,%r0) ASM_LINE_SEP \
++ ldi SYS_ify (syscall_name), %r20 ASM_LINE_SEP \
++ /* Caller will restore r19 */ ASM_LINE_SEP \
++ ldi -0x1000,%r1 ASM_LINE_SEP \
++ cmpb,>>=,n %r1,%ret0,0f ASM_LINE_SEP \
++ sub %r0, %ret0, %ret0 ASM_LINE_SEP \
++0: ASM_LINE_SEP \
++ UNDOARGS_##args ASM_LINE_SEP
++
+ #define DOARGS_0 /* nothing */
+ #define DOARGS_1 /* nothing */
+ #define DOARGS_2 /* nothing */
+@@ -198,26 +303,87 @@
+
+ #else
+
++/* GCC has to be warned that a syscall may clobber all the ABI
++ registers listed as "caller-saves", see page 8, Table 2
++ in section 2.2.6 of the PA-RISC RUN-TIME architecture
++ document. However! r28 is the result and will conflict with
++ the clobber list so it is left out. Also the input arguments
++ registers r20 -> r26 will conflict with the list so they
++ are treated specially. Although r19 is clobbered by the syscall
++ we cannot say this because it would violate ABI, thus we say
++ r4 is clobbered and use that register to save/restore r19
++ across the syscall. */
++
++#define CALL_CLOB_REGS "%r1", "%r2", USING_GR4 \
++ "%r20", "%r29", "%r31"
++
+ #undef INLINE_SYSCALL
+-#define INLINE_SYSCALL(name, nr, args...) ({ \
++#define INLINE_SYSCALL(name, nr, args...) ({ \
++ long __sys_res; \
++ { \
++ register unsigned long __res asm("r28"); \
++ LOAD_ARGS_##nr(args) \
++ /* FIXME: HACK stw/ldw r19 around syscall */ \
++ asm volatile( \
++ STW_ASM_PIC \
++ " ble 0x100(%%sr2, %%r0)\n" \
++ " ldi %1, %%r20\n" \
++ LDW_ASM_PIC \
++ : "=r" (__res) \
++ : "i" (SYS_ify(name)) ASM_ARGS_##nr \
++ : CALL_CLOB_REGS CLOB_ARGS_##nr \
++ ); \
++ __sys_res = (long)__res; \
++ } \
++ if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){ \
++ __set_errno(-__sys_res); \
++ __sys_res = -1; \
++ } \
++ __sys_res; \
++})
++
++/* INTERNAL_SYSCALL_DECL - Allows us to setup some function static
++ value to use within the context of the syscall
++ INTERNAL_SYSCALL_ERROR_P - Returns 0 if it wasn't an error, 1 otherwise
++ You are allowed to use the syscall result (val) and the DECL error variable
++ to determine what went wrong.
++ INTERLAL_SYSCALL_ERRNO - Munges the val/err pair into the error number.
++ In our case we just flip the sign. */
++
++#undef INTERNAL_SYSCALL_DECL
++#define INTERNAL_SYSCALL_DECL(err) do { } while (0)
++
++/* Equivalent to (val < 0)&&(val > -4095) which is what we want */
++#undef INTERNAL_SYSCALL_ERROR_P
++#define INTERNAL_SYSCALL_ERROR_P(val, err) \
++ ((unsigned long)val >= (unsigned long)-4095)
++
++#undef INTERNAL_SYSCALL_ERRNO
++#define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
++
++/* Similar to INLINE_SYSCALL but we don't set errno */
++#undef INTERNAL_SYSCALL
++#define INTERNAL_SYSCALL(name, err, nr, args...) \
++({ \
+ long __sys_res; \
+ { \
+ register unsigned long __res asm("r28"); \
+ LOAD_ARGS_##nr(args) \
++ /* FIXME: HACK stw/ldw r19 around syscall */ \
+ asm volatile( \
+- "ble 0x100(%%sr2, %%r0)\n\t" \
+- " ldi %1, %%r20" \
++ STW_ASM_PIC \
++ " ble 0x100(%%sr2, %%r0)\n" \
++ " ldi %1, %%r20\n" \
++ LDW_ASM_PIC \
+ : "=r" (__res) \
+ : "i" (SYS_ify(name)) ASM_ARGS_##nr \
+- ); \
+- __sys_res = __res; \
+- } \
+- if ((unsigned long)__sys_res >= (unsigned long)-4095) { \
+- __set_errno(-__sys_res); \
+- __sys_res = -1; \
++ : CALL_CLOB_REGS CLOB_ARGS_##nr \
++ ); \
++ __sys_res = (long)__res; \
+ } \
+ __sys_res; \
+-})
++ })
++
+
+ #define LOAD_ARGS_0()
+ #define LOAD_ARGS_1(r26) \
+@@ -239,12 +405,22 @@
+ register unsigned long __r21 __asm__("r21") = (unsigned long)r21; \
+ LOAD_ARGS_5(r26,r25,r24,r23,r22)
+
+-#define ASM_ARGS_0
+-#define ASM_ARGS_1 , "r" (__r26)
+-#define ASM_ARGS_2 , "r" (__r26), "r" (__r25)
+-#define ASM_ARGS_3 , "r" (__r26), "r" (__r25), "r" (__r24)
+-#define ASM_ARGS_4 , "r" (__r26), "r" (__r25), "r" (__r24), "r" (__r23)
+-#define ASM_ARGS_5 , "r" (__r26), "r" (__r25), "r" (__r24), "r" (__r23), "r" (__r22)
+-#define ASM_ARGS_6 , "r" (__r26), "r" (__r25), "r" (__r24), "r" (__r23), "r" (__r22), "r" (__r21)
+-
++/* Even with zero args we use r20 for the syscall number */
++#define ASM_ARGS_0
++#define ASM_ARGS_1 ASM_ARGS_0, "r" (__r26)
++#define ASM_ARGS_2 ASM_ARGS_1, "r" (__r25)
++#define ASM_ARGS_3 ASM_ARGS_2, "r" (__r24)
++#define ASM_ARGS_4 ASM_ARGS_3, "r" (__r23)
++#define ASM_ARGS_5 ASM_ARGS_4, "r" (__r22)
++#define ASM_ARGS_6 ASM_ARGS_5, "r" (__r21)
++
++/* The registers not listed as inputs but clobbered */
++#define CLOB_ARGS_6
++#define CLOB_ARGS_5 CLOB_ARGS_6, "%r21"
++#define CLOB_ARGS_4 CLOB_ARGS_5, "%r22"
++#define CLOB_ARGS_3 CLOB_ARGS_4, "%r23"
++#define CLOB_ARGS_2 CLOB_ARGS_3, "%r24"
++#define CLOB_ARGS_1 CLOB_ARGS_2, "%r25"
++#define CLOB_ARGS_0 CLOB_ARGS_1, "%r26"
++
+ #endif /* __ASSEMBLER__ */
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/umount.c 2003-10-22 01:03:48.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/umount.c 2003-10-22 01:07:38.000000000 -0400
+@@ -21,6 +21,7 @@
+ does down here. */
+
+ extern long int __umount2 (const char *name, int flags);
++extern long int __umount (const char * name);
+
+ long int
+ __umount (const char *name)
+--- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/sys/sysctl.h 2003-10-22 01:06:13.000000000 -0400
++++ glibc-2.3.2/sysdeps/unix/sysv/linux/sys/sysctl.h 2003-10-22 01:07:38.000000000 -0400
+@@ -24,7 +24,9 @@
+ #include <stddef.h>
+ /* Prevent more kernel headers than necessary to be included. */
+ #define _LINUX_KERNEL_H 1
+-#define _LINUX_TYPES_H 1
++#ifndef _LINUX_TYPES_H
++# define _LINUX_TYPES_H 1
++#endif
+ #define _LINUX_LIST_H 1
+ /* We do need this one for the declarations in <linux/sysctl.h>,
+ since we've elided the inclusion of <linux/kernel.h> that gets them. */