NPTL support on ia32

From: Arun Sharma <arun.sharma_at_intel.com>
Date: 2003-10-18 10:36:30
The attached patch helps you run NPTL based IA-32 threaded programs on 
Itanium. Specifically it:

- adds support for new NPTL related system calls
- adds support for a per-CPU GDT, just like i386 does
- adds support for new clone related flags
- Fix clobbering of %ebp (r13) (see process.c)


Please review and let me know if you have any comments.

    -Arun


Index: linux-2.6/include/asm-ia64/ia32.h
===================================================================
--- linux-2.6/include/asm-ia64/ia32.h	(revision 14111)
+++ linux-2.6/include/asm-ia64/ia32.h	(working copy)
@@ -9,9 +9,11 @@
 #ifdef CONFIG_IA32_SUPPORT
 
 extern void ia32_cpu_init (void);
+extern void ia32_boot_gdt_init (void);
 extern void ia32_gdt_init (void);
 extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
 extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
+extern int ia32_clone_tls(struct task_struct *child, struct pt_regs *childregs);
 
 #endif /* !CONFIG_IA32_SUPPORT */
 
Index: linux-2.6/include/asm-ia64/processor.h
===================================================================
--- linux-2.6/include/asm-ia64/processor.h	(revision 14111)
+++ linux-2.6/include/asm-ia64/processor.h	(working copy)
@@ -230,6 +230,25 @@
 		 (int *) (addr));								\
 })
 
+#ifdef CONFIG_IA32_SUPPORT
+struct desc_struct {
+	unsigned int a,b;
+};
+
+#define desc_empty(desc) \
+		(!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+		(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+
+#define GDT_ENTRY_TLS_ENTRIES	3
+#define GDT_ENTRY_TLS_MIN	6
+#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#endif
+
 struct thread_struct {
 	__u32 flags;			/* various thread flags (see IA64_THREAD_*) */
 	/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
@@ -249,6 +268,9 @@
 	__u64 fdr;			/* IA32 fp except. data reg */
 	__u64 old_k1;			/* old value of ar.k1 */
 	__u64 old_iob;			/* old IOBase value */
+        /* cached TLS descriptors. */
+	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+
 # define INIT_THREAD_IA32	.eflag =	0,			\
 				.fsr =		0,			\
 				.fcr =		0x17800000037fULL,	\
Index: linux-2.6/arch/ia64/kernel/process.c
===================================================================
--- linux-2.6/arch/ia64/kernel/process.c	(revision 14111)
+++ linux-2.6/arch/ia64/kernel/process.c	(working copy)
@@ -36,6 +36,10 @@
 # include <asm/perfmon.h>
 #endif
 
+#ifdef CONFIG_IA32_SUPPORT
+#include <asm/ia32.h>
+#endif
+
 #include "sigframe.h"
 
 void (*ia64_mark_idle)(int);
@@ -324,7 +328,8 @@
 	memcpy((void *) child_rbs, (void *) rbs, rbs_size);
 
 	if (user_mode(child_ptregs)) {
-		if (clone_flags & CLONE_SETTLS)
+		if ((clone_flags & CLONE_SETTLS) && (!IS_IA32_PROCESS(regs)))
+
 			child_ptregs->r13 = regs->r16;	/* see sys_clone2() in entry.S */
 		if (user_stack_base) {
 			child_ptregs->r12 = user_stack_base + user_stack_size - 16;
@@ -383,8 +388,11 @@
 	 * If we're cloning an IA32 task then save the IA32 extra
 	 * state from the current task to the new task
 	 */
-	if (IS_IA32_PROCESS(ia64_task_regs(current)))
+	if (IS_IA32_PROCESS(ia64_task_regs(current))) {
 		ia32_save_state(p);
+		if (clone_flags & CLONE_SETTLS)
+			retval = ia32_clone_tls(p, child_ptregs);
+	}
 #endif
 
 #ifdef CONFIG_PERFMON
Index: linux-2.6/arch/ia64/kernel/smpboot.c
===================================================================
--- linux-2.6/arch/ia64/kernel/smpboot.c	(revision 14111)
+++ linux-2.6/arch/ia64/kernel/smpboot.c	(working copy)
@@ -47,6 +47,9 @@
 #include <asm/sal.h>
 #include <asm/system.h>
 #include <asm/unistd.h>
+#ifdef CONFIG_IA32_SUPPORT
+#include <asm/ia32.h>
+#endif
 
 #define SMP_DEBUG 0
 
@@ -312,6 +315,9 @@
 	local_irq_enable();
 	calibrate_delay();
 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
+#ifdef CONFIG_IA32_SUPPORT
+	ia32_gdt_init();
+#endif
 
 	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
 		/*
Index: linux-2.6/arch/ia64/mm/init.c
===================================================================
--- linux-2.6/arch/ia64/mm/init.c	(revision 14111)
+++ linux-2.6/arch/ia64/mm/init.c	(working copy)
@@ -555,6 +555,6 @@
 	setup_gate();	/* setup gate pages before we free up boot memory... */
 
 #ifdef CONFIG_IA32_SUPPORT
-	ia32_gdt_init();
+	ia32_boot_gdt_init();
 #endif
 }
Index: linux-2.6/arch/ia64/ia32/ia32_entry.S
===================================================================
--- linux-2.6/arch/ia64/ia32/ia32_entry.S	(revision 14111)
+++ linux-2.6/arch/ia64/ia32/ia32_entry.S	(working copy)
@@ -32,7 +32,7 @@
 
 ENTRY(ia32_clone)
 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
-	alloc r16=ar.pfs,2,2,4,0
+	alloc r16=ar.pfs,5,2,6,0
 	DO_SAVE_SWITCH_STACK
 	mov loc0=rp
 	mov loc1=r16				// save ar.pfs across do_fork
@@ -41,6 +41,8 @@
 	mov out3=16				// stacksize (compensates for 16-byte scratch area)
 	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
 	dep out0=0,in0,CLONE_IDLETASK_BIT,1	// out0 = clone_flags & ~CLONE_IDLETASK
+	zxt4 out4=in2				// out4 = parent_tidptr
+	zxt4 out5=in4				// out5 = child_tidptr
 	br.call.sptk.many rp=do_fork
 .ret0:	.restore sp
 	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
@@ -424,13 +426,13 @@
 	data8 sys_ni_syscall	/* 235 */
 	data8 sys_ni_syscall
 	data8 sys_ni_syscall
+	data8 sys_tkill
 	data8 sys_ni_syscall
-	data8 sys_ni_syscall
 	data8 compat_sys_futex	/* 240 */
 	data8 compat_sys_sched_setaffinity
 	data8 compat_sys_sched_getaffinity
-	data8 sys_ni_syscall
-	data8 sys_ni_syscall
+	data8 sys32_set_thread_area
+	data8 sys32_get_thread_area
 	data8 sys_ni_syscall	/* 245 */
 	data8 sys_ni_syscall
 	data8 sys_ni_syscall
@@ -438,14 +440,14 @@
 	data8 sys_ni_syscall
 	data8 sys_ni_syscall	/* 250 */
 	data8 sys_ni_syscall
+	data8 sys_exit_group
 	data8 sys_ni_syscall
-	data8 sys_ni_syscall
 	data8 sys_epoll_create
 	data8 sys32_epoll_ctl	/* 255 */
 	data8 sys32_epoll_wait
+	data8 sys_remap_file_pages
+	data8 sys_set_tid_address
 	data8 sys_ni_syscall
-	data8 sys_ni_syscall
-	data8 sys_ni_syscall
 	data8 sys_ni_syscall	/* 260 */
 	data8 sys_ni_syscall
 	data8 sys_ni_syscall
Index: linux-2.6/arch/ia64/ia32/ia32_ldt.c
===================================================================
--- linux-2.6/arch/ia64/ia32/ia32_ldt.c	(revision 14111)
+++ linux-2.6/arch/ia64/ia32/ia32_ldt.c	(working copy)
@@ -82,7 +82,7 @@
 static int
 write_ldt (void * ptr, unsigned long bytecount, int oldmode)
 {
-	struct ia32_modify_ldt_ldt_s ldt_info;
+	struct ia32_user_desc ldt_info;
 	__u64 entry;
 	int ret;
 
Index: linux-2.6/arch/ia64/ia32/ia32priv.h
===================================================================
--- linux-2.6/arch/ia64/ia32/ia32priv.h	(revision 14111)
+++ linux-2.6/arch/ia64/ia32/ia32priv.h	(working copy)
@@ -327,15 +327,30 @@
 #define __USER_CS      0x23
 #define __USER_DS      0x2B
 
-#define FIRST_TSS_ENTRY 6
-#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
-#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
-#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+/*
+ * The per-cpu GDT has 32 entries: see <asm-i386/segment.h>
+ */
+#define GDT_ENTRIES 32
 
+#define GDT_SIZE (GDT_ENTRIES * 8)
+
+#define GDT_ENTRY_TLS_ENTRIES	3
+#define GDT_ENTRY_TLS_MIN	6
+#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#define TSS_ENTRY 14
+#define LDT_ENTRY (TSS_ENTRY+1)
+
+
 #define IA32_SEGSEL_RPL		(0x3 << 0)
 #define IA32_SEGSEL_TI		(0x1 << 2)
 #define IA32_SEGSEL_INDEX_SHIFT	3
 
+#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)	
+#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)	
+
 #define IA32_SEG_BASE		16
 #define IA32_SEG_TYPE		40
 #define IA32_SEG_SYS		44
@@ -419,7 +434,41 @@
 #define IA32_LDT_ENTRIES	8192		/* Maximum number of LDT entries supported. */
 #define IA32_LDT_ENTRY_SIZE	8		/* The size of each LDT entry. */
 
-struct ia32_modify_ldt_ldt_s {
+#define LDT_entry_a(info) \
+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+	(((info)->base_addr & 0xff000000) | \
+	(((info)->base_addr & 0x00ff0000) >> 16) | \
+	((info)->limit & 0xf0000) | \
+	(((info)->read_exec_only ^ 1) << 9) | \
+	((info)->contents << 10) | \
+	(((info)->seg_not_present ^ 1) << 15) | \
+	((info)->seg_32bit << 22) | \
+	((info)->limit_in_pages << 23) | \
+	((info)->useable << 20) | \
+	0x7100)
+
+#define LDT_empty(info) (\
+	(info)->base_addr	== 0	&& \
+	(info)->limit		== 0	&& \
+	(info)->contents	== 0	&& \
+	(info)->read_exec_only	== 1	&& \
+	(info)->seg_32bit	== 0	&& \
+	(info)->limit_in_pages	== 0	&& \
+	(info)->seg_not_present	== 1	&& \
+	(info)->useable		== 0	)
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+	extern struct desc_struct *cpu_gdt_table[NR_CPUS];
+
+#define C(i) cpu_gdt_table[cpu][GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+	C(0); C(1); C(2);
+#undef C
+}
+
+struct ia32_user_desc {
 	unsigned int entry_number;
 	unsigned int base_addr;
 	unsigned int limit;
Index: linux-2.6/arch/ia64/ia32/binfmt_elf32.c
===================================================================
--- linux-2.6/arch/ia64/ia32/binfmt_elf32.c	(revision 14111)
+++ linux-2.6/arch/ia64/ia32/binfmt_elf32.c	(working copy)
@@ -62,7 +62,7 @@
 struct page *
 ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int no_share)
 {
-	struct page *pg = ia32_shared_page[(address - vma->vm_start)/PAGE_SIZE];
+	struct page *pg = ia32_shared_page[ smp_processor_id() + (address - vma->vm_start)/PAGE_SIZE];
 
 	get_page(pg);
 	return pg;
Index: linux-2.6/arch/ia64/ia32/ia32_support.c
===================================================================
--- linux-2.6/arch/ia64/ia32/ia32_support.c	(revision 14111)
+++ linux-2.6/arch/ia64/ia32/ia32_support.c	(working copy)
@@ -23,14 +23,16 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 #include <asm/processor.h>
+#include <asm/uaccess.h>
 
 #include "ia32priv.h"
 
 extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
 
 struct exec_domain ia32_exec_domain;
-struct page *ia32_shared_page[(2*IA32_PAGE_SIZE + PAGE_SIZE - 1)/PAGE_SIZE];
-unsigned long *ia32_gdt;
+struct page *ia32_shared_page[(PAGE_ALIGN(IA32_PAGE_SIZE)/PAGE_SIZE) * NR_CPUS];
+unsigned long *ia32_boot_gdt;
+unsigned long *cpu_gdt_table[NR_CPUS];
 
 static unsigned long
 load_desc (u16 selector)
@@ -43,8 +45,8 @@
 		table = (unsigned long *) IA32_LDT_OFFSET;
 		limit = IA32_LDT_ENTRIES;
 	} else {
-		table = ia32_gdt;
-		limit = IA32_PAGE_SIZE / sizeof(ia32_gdt[0]);
+		table = cpu_gdt_table[smp_processor_id()];
+		limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
 	}
 	index = selector >> IA32_SEGSEL_INDEX_SHIFT;
 	if (index >= limit)
@@ -66,6 +68,35 @@
 	regs->ar_ssd = load_desc(regs->r17 >> 16);	/* SSD */
 }
 
+int
+ia32_clone_tls(struct task_struct *child, struct pt_regs *childregs)
+{
+	struct desc_struct *desc;
+	struct ia32_user_desc info;
+	int idx;
+
+	if (copy_from_user(&info, (void *)(childregs->r14 & 0xffffffff), 
+			   sizeof(info)))
+		return -EFAULT;
+	if (LDT_empty(&info))
+		return -EINVAL;
+
+	idx = info.entry_number;
+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+		return -EINVAL;
+
+	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+	desc->a = LDT_entry_a(&info);
+	desc->b = LDT_entry_b(&info);
+
+	/* XXX: can this be done in a cleaner way ? */
+	load_TLS(&child->thread, smp_processor_id());
+	ia32_load_segment_descriptors(child);
+	load_TLS(&current->thread, smp_processor_id());
+
+	return 0;
+}
+
 void
 ia32_save_state (struct task_struct *t)
 {
@@ -76,6 +107,7 @@
 	t->thread.fdr   = ia64_getreg(_IA64_REG_AR_FDR);
 	ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
 	ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
+
 }
 
 void
@@ -83,14 +115,13 @@
 {
 	unsigned long eflag, fsr, fcr, fir, fdr, tssd;
 	struct pt_regs *regs = ia64_task_regs(t);
-	int nr = get_cpu();	/* LDT and TSS depend on CPU number: */
 
 	eflag = t->thread.eflag;
 	fsr = t->thread.fsr;
 	fcr = t->thread.fcr;
 	fir = t->thread.fir;
 	fdr = t->thread.fdr;
-	tssd = load_desc(_TSS(nr));					/* TSSD */
+	tssd = load_desc(_TSS);					/* TSSD */
 
 	ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
 	ia64_setreg(_IA64_REG_AR_FSR, fsr);
@@ -102,8 +133,10 @@
 	ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
 	ia64_set_kr(IA64_KR_TSSD, tssd);
 
-	regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17;
-	regs->r30 = load_desc(_LDT(nr));				/* LDTD */
+	regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
+	regs->r30 = load_desc(_LDT);				/* LDTD */
+	load_TLS(&t->thread, smp_processor_id());
+
 	put_cpu();
 }
 
@@ -113,37 +146,43 @@
 void
 ia32_gdt_init (void)
 {
-	unsigned long *tss;
+	int cpu = smp_processor_id();
+
+	ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
+	cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
+
+	/* Copy from the boot cpu's GDT */
+	memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
+}
+
+
+/*
+ * Setup IA32 GDT and TSS
+ */
+void
+ia32_boot_gdt_init (void)
+{
 	unsigned long ldt_size;
-	int nr;
 
 	ia32_shared_page[0] = alloc_page(GFP_KERNEL);
-	ia32_gdt = page_address(ia32_shared_page[0]);
-	tss = ia32_gdt + IA32_PAGE_SIZE/sizeof(ia32_gdt[0]);
+	ia32_boot_gdt = page_address(ia32_shared_page[0]);
+	cpu_gdt_table[0] = ia32_boot_gdt;
 
-	if (IA32_PAGE_SIZE == PAGE_SIZE) {
-		ia32_shared_page[1] = alloc_page(GFP_KERNEL);
-		tss = page_address(ia32_shared_page[1]);
-	}
-
 	/* CS descriptor in IA-32 (scrambled) format */
-	ia32_gdt[__USER_CS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
+	ia32_boot_gdt[__USER_CS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
 						       0xb, 1, 3, 1, 1, 1, 1);
 
 	/* DS descriptor in IA-32 (scrambled) format */
-	ia32_gdt[__USER_DS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
+	ia32_boot_gdt[__USER_DS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
 						       0x3, 1, 3, 1, 1, 1, 1);
 
-	/* We never change the TSS and LDT descriptors, so we can share them across all CPUs.  */
 	ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
-	for (nr = 0; nr < NR_CPUS; ++nr) {
-		ia32_gdt[_TSS(nr) >> IA32_SEGSEL_INDEX_SHIFT]
-			= IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
-					      0xb, 0, 3, 1, 1, 1, 0);
-		ia32_gdt[_LDT(nr) >> IA32_SEGSEL_INDEX_SHIFT]
-			= IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
-					      0x2, 0, 3, 1, 1, 1, 0);
-	}
+	ia32_boot_gdt[TSS_ENTRY]
+		= IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
+				      0xb, 0, 3, 1, 1, 1, 0);
+	ia32_boot_gdt[LDT_ENTRY]	
+		= IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
+				      0x2, 0, 3, 1, 1, 1, 0);
 }
 
 /*
Index: linux-2.6/arch/ia64/ia32/sys_ia32.c
===================================================================
--- linux-2.6/arch/ia64/ia32/sys_ia32.c	(revision 14111)
+++ linux-2.6/arch/ia64/ia32/sys_ia32.c	(working copy)
@@ -2817,6 +2817,116 @@
 	return numevents;
 }
 
+/*
+ * Get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+	struct thread_struct *t = &current->thread;
+	int idx;
+
+	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+		if (desc_empty(t->tls_array + idx))
+			return idx + GDT_ENTRY_TLS_MIN;
+	return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+asmlinkage int
+sys32_set_thread_area(struct ia32_user_desc *u_info)
+{
+	struct thread_struct *t = &current->thread;
+	struct ia32_user_desc info;
+	struct desc_struct *desc;
+	int cpu, idx;
+
+	if (copy_from_user(&info, u_info, sizeof(info)))
+		return -EFAULT;
+	idx = info.entry_number;
+
+	/*
+	 * index -1 means the kernel should try to find and
+	 * allocate an empty descriptor:
+	 */
+	if (idx == -1) {
+		idx = get_free_idx();
+		if (idx < 0)
+			return idx;
+		if (put_user(idx, &u_info->entry_number))
+			return -EFAULT;
+	}
+
+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+		return -EINVAL;
+
+	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+	cpu = smp_processor_id();
+
+	if (LDT_empty(&info)) {
+		desc->a = 0;
+		desc->b = 0;
+	} else {
+		desc->a = LDT_entry_a(&info);
+		desc->b = LDT_entry_b(&info);
+	}
+	load_TLS(t, cpu);
+
+
+	return 0;
+}
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+	(((desc)->a >> 16) & 0x0000ffff) | \
+	(((desc)->b << 16) & 0x00ff0000) | \
+	( (desc)->b        & 0xff000000)   )
+
+#define GET_LIMIT(desc) ( \
+	((desc)->a & 0x0ffff) | \
+	 ((desc)->b & 0xf0000) )
+	
+#define GET_32BIT(desc)		(((desc)->b >> 23) & 1)
+#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
+#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
+
+asmlinkage int
+sys32_get_thread_area(struct ia32_user_desc *u_info)
+{
+	struct ia32_user_desc info;
+	struct desc_struct *desc;
+	int idx;
+
+	if (get_user(idx, &u_info->entry_number))
+		return -EFAULT;
+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+		return -EINVAL;
+
+	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+	info.entry_number = idx;
+	info.base_addr = GET_BASE(desc);
+	info.limit = GET_LIMIT(desc);
+	info.seg_32bit = GET_32BIT(desc);
+	info.contents = GET_CONTENTS(desc);
+	info.read_exec_only = !GET_WRITABLE(desc);
+	info.limit_in_pages = GET_LIMIT_PAGES(desc);
+	info.seg_not_present = !GET_PRESENT(desc);
+	info.useable = GET_USEABLE(desc);
+
+	if (copy_to_user(u_info, &info, sizeof(info)))
+		return -EFAULT;
+	return 0;
+}
+
 #ifdef	NOTYET  /* UNTESTED FOR IA64 FROM HERE DOWN */
 
 struct ncp_mount_data32 {

-
To unsubscribe from this list: send the line "unsubscribe linux-ia64" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Received on Fri Oct 17 20:38:30 2003

This archive was generated by hypermail 2.1.8 : 2005-08-02 09:20:19 EST