[PATCH] 80-column reformatting for fsys.S

From: Al Stone <ahs3_at_fc.hp.com>
Date: 2006-08-23 09:08:01
Simple reformatting to clean up fsys.S and make it comply with
Linux CodingStyle.

Signed-off-by: Al Stone <ahs3@fc.hp.com>


diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 7a05b1c..2bf90a8 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -6,10 +6,11 @@
  *
  * 25-Sep-03 davidm	Implement fsys_rt_sigprocmask().
  * 18-Feb-03 louisk	Implement fsys_gettimeofday().
- * 28-Feb-03 davidm	Fixed several bugs in fsys_gettimeofday().  Tuned it some more,
- *			probably broke it along the way... ;-)
- * 13-Jul-04 clameter   Implement fsys_clock_gettime and revise fsys_gettimeofday to make
- *                      it capable of using memory based clocks without falling back to C code.
+ * 28-Feb-03 davidm	Fixed several bugs in fsys_gettimeofday().  Tuned it
+ *			some more, probably broke it along the way... ;-)
+ * 13-Jul-04 clameter   Implement fsys_clock_gettime and revise 
+ *			fsys_gettimeofday to make it capable of using memory
+ *			based clocks without falling back to C code.
  */
 
 #include <asm/asmmacro.h>
@@ -83,31 +84,34 @@ ENTRY(fsys_getppid)
 	;;
 
 	ld4 r9=[r9]
-	add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
+	// r17 = &current->group_leader->real_parent
+	add r17=IA64_TASK_REAL_PARENT_OFFSET,r17
 	;;
 	and r9=TIF_ALLWORK_MASK,r9
 
-1:	ld8 r18=[r17]				// r18 = current->group_leader->real_parent
+1:	ld8 r18=[r17]		// r18 = current->group_leader->real_parent
 	;;
 	cmp.ne p8,p0=0,r9
-	add r8=IA64_TASK_TGID_OFFSET,r18	// r8 = &current->group_leader->real_parent->tgid
+	// r8 = &current->group_leader->real_parent->tgid
+	add r8=IA64_TASK_TGID_OFFSET,r18
 	;;
 
 	/*
-	 * The .acq is needed to ensure that the read of tgid has returned its data before
-	 * we re-check "real_parent".
+	 * The .acq is needed to ensure that the read of tgid has returned
+	 * its data before we re-check "real_parent".
 	 */
-	ld4.acq r8=[r8]				// r8 = current->group_leader->real_parent->tgid
+	ld4.acq r8=[r8]	    // r8 = current->group_leader->real_parent->tgid
 #ifdef CONFIG_SMP
 	/*
 	 * Re-read current->group_leader->real_parent.
 	 */
-	ld8 r19=[r17]				// r19 = current->group_leader->real_parent
+	ld8 r19=[r17]		// r19 = current->group_leader->real_parent
 (p8)	br.spnt.many fsys_fallback_syscall
 	;;
-	cmp.ne p6,p0=r18,r19			// did real_parent change?
+	cmp.ne p6,p0=r18,r19		// did real_parent change?
 	mov r19=0			// i must not leak kernel bits...
-(p6)	br.cond.spnt.few 1b			// yes -> redo the read of tgid and the check
+(p6)	br.cond.spnt.few 1b		// yes -> redo the read of tgid and
+					//   the check
 	;;
 	mov r17=0			// i must not leak kernel bits...
 	mov r18=0			// i must not leak kernel bits...
@@ -148,8 +152,10 @@ END(fsys_set_tid_address)
 /*
  * Ensure that the time interpolator structure is compatible with the asm code
  */
-#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \
-	|| IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4
+#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || \
+    IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 || \
+    IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || \
+    IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4
 #error fsys_gettimeofday incompatible with changes to struct time_interpolator
 #endif
 #define CLOCK_REALTIME 0
@@ -183,7 +189,8 @@ (p6)    br.cond.spnt.few .fail_einval
 	// r17 = wall to monotonic use
 	// r18 = time_interpolator->offset
 	// r19 = address of wall_to_monotonic
-	// r20 = pointer to struct time_interpolator / pointer to time_interpolator->address
+	// r20 = pointer to struct time_interpolator / 
+	//       pointer to time_interpolator->address
 	// r21 = shift factor
 	// r22 = address of time interpolator->last_counter
 	// r23 = address of time_interpolator->last_cycle
@@ -206,10 +213,11 @@ (p6)    br.cond.spnt.few .fail_einval
 	// p14 = Divide by 1000
 	// p15 = Add monotonic
 	//
-	// Note that instructions are optimized for McKinley. McKinley can process two
-	// bundles simultaneously and therefore we continuously try to feed the CPU
-	// two bundles and then a stop.
-	tnat.nz p6,p0 = r31	// branch deferred since it does not fit into bundle structure
+	// Note that instructions are optimized for McKinley. McKinley can
+	// process two bundles simultaneously and therefore we continuously
+	// try to feed the CPU two bundles and then a stop.
+	tnat.nz p6,p0 = r31	// branch deferred since it does not fit into
+				//   bundle structure
 	mov pr = r30,0xc000	// Set predicates according to function
 	add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
 	movl r20 = time_interpolator
@@ -218,7 +226,7 @@ (p6)    br.cond.spnt.few .fail_einval
 	movl r29 = xtime_lock
 	ld4 r2 = [r2]		// process work pending flags
 	movl r27 = xtime
-	;;	// only one bundle here
+	;;			// only one bundle here
 	ld8 r21 = [r20]		// first quad with control information
 	and r2 = TIF_ALLWORK_MASK,r2
 (p6)    br.cond.spnt.few .fail_einval	// deferred branch
@@ -244,13 +252,16 @@ (p12)	ld8 r30 = [r10]
 	;;
 .time_redo:
 	.pred.rel.mutex p8,p9,p10
-	ld4.acq r28 = [r29]	// xtime_lock.sequence. Must come first for locking purposes
+	ld4.acq r28 = [r29]	// xtime_lock.sequence. Must come first for
+				//   locking purposes
 (p8)	mov r2 = ar.itc		// CPU_TIMER. 36 clocks latency!!!
 	add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
-(p9)	ld8 r2 = [r30]		// readq(ti->address). Could also have latency issues..
+(p9)	ld8 r2 = [r30]		// readq(ti->address). Could also have 
+				//   latency issues..
 (p10)	ld4 r2 = [r30]		// readw(ti->address)
 (p13)	add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20
-	;;			// could be removed by moving the last add upward
+	;;			// could be removed by moving the last add
+				//   upward
 	ld8 r26 = [r22]		// time_interpolator->last_counter
 (p13)	ld8 r25 = [r23]		// time interpolator->last_cycle
 	add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20
@@ -263,7 +274,7 @@ (p15)	ld8 r17 = [r19],IA64_TIMESPEC_TV_N
 (p13)	sub r3 = r25,r2	// Diff needed before comparison (thanks davidm)
 	;;
 	ld8 r14 = [r14]		// time_interpolator->mask
-(p13)	cmp.gt.unc p6,p7 = r3,r0	// check if it is less than last. p6,p7 cleared
+(p13)	cmp.gt.unc p6,p7 = r3,r0  // check if less than last. p6,p7 cleared
 	sub r10 = r2,r26	// current_counter - last_counter
 	;;
 (p6)	sub r10 = r25,r26	// time we got was less than last_cycle
@@ -275,7 +286,8 @@ (p7)	mov ar.ccv = r25	// more than last_
 	nop.i 123
 	;;
 (p7)	cmpxchg8.rel r3 = [r23],r2,ar.ccv
-EX(.fail_efault, probe.w.fault r31, 3)	// This takes 5 cycles and we have spare time
+EX(.fail_efault, probe.w.fault r31, 3)	// This takes 5 cycles and we have
+					//   spare time
 	xmpy.l f8 = f8,f7	// nsec_per_cyc*(counter-last_counter)
 (p15)	add r9 = r9,r17		// Add wall to monotonic.secs to result secs
 	;;
@@ -290,7 +302,7 @@ (p7)	cmp.ne p7,p0 = r25,r3	// if cmpxchg
 	ld4 r10 = [r29]		// xtime_lock.sequence
 (p15)	add r8 = r8, r17	// Add monotonic.nsecs to nsecs
 	shr.u r2 = r2,r21
-	;;		// overloaded 3 bundles!
+	;;			// overloaded 3 bundles!
 	// End critical section.
 	add r8 = r8,r2		// Add xtime.nsecs
 	cmp4.ne.or p7,p0 = r28,r10
@@ -304,19 +316,21 @@ (p14)	movl r3 = 2361183241434822607	// P
 .time_normalize:
 	mov r21 = r8
 	cmp.ge p6,p0 = r8,r2
-(p14)	shr.u r20 = r8, 3		// We can repeat this if necessary just wasting some time
+(p14)	shr.u r20 = r8, 3		// We can repeat this if necessary,
+					//   just wasting some time
 	;;
 (p14)	setf.sig f8 = r20
 (p6)	sub r8 = r8,r2
 (p6)	add r9 = 1,r9			// two nops before the branch.
-(p14)	setf.sig f7 = r3		// Chances for repeats are 1 in 10000 for gettod
+(p14)	setf.sig f7 = r3		// Chances for repeats are 1 in 10000
+					//   for gettod
 (p6)	br.cond.dpnt.few .time_normalize
 	;;
 	// Divided by 8 though shift. Now divide by 125
 	// The compiler was able to do that with a multiply
 	// and a shift and we do the same
-EX(.fail_efault, probe.w.fault r23, 3)		// This also costs 5 cycles
-(p14)	xmpy.hu f8 = f8, f7			// xmpy has 5 cycles latency so use it...
+EX(.fail_efault, probe.w.fault r23, 3)	// This also costs 5 cycles
+(p14)	xmpy.hu f8 = f8, f7		// xmpy has 5 cycle latency so use it..
 	;;
 	mov r8 = r0
 (p14)	getf.sig r2 = f8
@@ -349,7 +363,8 @@ (p6)	br.spnt.few fsys_fallback_syscall
 END(fsys_clock_gettime)
 
 /*
- * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize).
+ * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, 
+ *			     size_t sigsetsize).
  */
 #if _NSIG_WORDS != 1
 # error Sorry, fsys_rt_sigprocmask() needs to be updated for _NSIG_WORDS != 1.
@@ -363,11 +378,11 @@ ENTRY(fsys_rt_sigprocmask)
 	add r9=TI_FLAGS+IA64_TASK_SIZE,r16
 	cmp4.ltu p6,p0=SIG_SETMASK,r32
 
-	cmp.ne p15,p0=r0,r34			// oset != NULL?
+	cmp.ne p15,p0=r0,r34		// oset != NULL?
 	tnat.nz p8,p0=r34
 	add r31=IA64_TASK_SIGHAND_OFFSET,r16
 	;;
-	ld8 r3=[r2]				// read/prefetch current->blocked
+	ld8 r3=[r2]			// read/prefetch current->blocked
 	ld4 r9=[r9]
 	tnat.nz.or p6,p0=r35
 
@@ -383,14 +398,20 @@ #endif
 	;;
 	cmp.ne p7,p0=0,r9
 	cmp.eq p6,p0=r0,r33			// set == NULL?
-	add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31	// r31 <- current->sighand->siglock
+	// r31 <- current->sighand->siglock
+	add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31	
 (p8)	br.spnt.few .fail_efault		// fail with EFAULT
 (p7)	br.spnt.many fsys_fallback_syscall	// got pending kernel work...
-(p6)	br.dpnt.many .store_mask		// -> short-circuit to just reading the signal mask
+(p6)	br.dpnt.many .store_mask		// -> short-circuit to just
+						//   reading the signal mask
 
-	/* Argh, we actually have to do some work and _update_ the signal mask: */
+	/* 
+	 * Argh, we actually have to do some work and _update_ the signal
+	 * mask:
+	 */
 
-EX(.fail_efault, probe.r.fault r33, 3)		// verify user has read-access to *set
+EX(.fail_efault, probe.r.fault r33, 3)		// verify user has read-access
+						//   to *set
 EX(.fail_efault, ld8 r14=[r33])			// r14 <- *set
 	mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
 	;;
@@ -403,21 +424,23 @@ #ifdef CONFIG_SMP
 	mov r17=1
 	;;
 	cmpxchg4.acq r18=[r31],r17,ar.ccv	// try to acquire the lock
-	mov r8=EINVAL			// default to EINVAL
+	mov r8=EINVAL				// default to EINVAL
 	;;
-	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock
+	ld8 r3=[r2]				// re-read current->blocked 
+						//   now that we hold the lock
 	cmp4.ne p6,p0=r18,r0
 (p6)	br.cond.spnt.many .lock_contention
 	;;
 #else
-	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock
-	mov r8=EINVAL			// default to EINVAL
+	ld8 r3=[r2]				// re-read current->blocked 
+						//   now that we hold the lock
+	mov r8=EINVAL				// default to EINVAL
 #endif
 	add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
 	add r19=IA64_TASK_SIGNAL_OFFSET,r16
 	cmp4.eq p6,p0=SIG_BLOCK,r32
 	;;
-	ld8 r19=[r19]			// r19 <- current->signal
+	ld8 r19=[r19]				// r19 <- current->signal
 	cmp4.eq p7,p0=SIG_UNBLOCK,r32
 	cmp4.eq p8,p0=SIG_SETMASK,r32
 	;;
@@ -431,59 +454,64 @@ (p6)	mov r8=0			// clear error code
 	// recalc_sigpending()
 	add r17=IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,r19
 
-	add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r19
+	add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET + \
+	        IA64_SIGPENDING_SIGNAL_OFFSET,r19
 	;;
 	ld4 r17=[r17]		// r17 <- current->signal->group_stop_count
 (p7)	mov r8=0		// clear error code
 
 	ld8 r19=[r19]		// r19 <- current->signal->shared_pending
 	;;
-	cmp4.gt p6,p7=r17,r0	// p6/p7 <- (current->signal->group_stop_count > 0)?
+	// p6/p7 <- (current->signal->group_stop_count > 0)?
+	cmp4.gt p6,p7=r17,r0
 (p8)	mov r8=0		// clear error code
 
-	or r18=r18,r19		// r18 <- current->pending | current->signal->shared_pending
+	or r18=r18,r19		// r18 <- current->pending | 
+				//	  current->signal->shared_pending
 	;;
-	// r18 <- (current->pending | current->signal->shared_pending) & ~current->blocked:
+	// r18 <- (current->pending | current->signal->shared_pending) &
+	//	   ~current->blocked:
 	andcm r18=r18,r14
 	add r9=TI_FLAGS+IA64_TASK_SIZE,r16
 	;;
 
-(p7)	cmp.ne.or.andcm p6,p7=r18,r0		// p6/p7 <- signal pending
-	mov r19=0					// i must not leak kernel bits...
+(p7)	cmp.ne.or.andcm p6,p7=r18,r0	// p6/p7 <- signal pending
+	mov r19=0			// must not leak kernel bits...
 (p6)	br.cond.dpnt.many .sig_pending
 	;;
 
-1:	ld4 r17=[r9]				// r17 <- current->thread_info->flags
+1:	ld4 r17=[r9]			// r17 <- current->thread_info->flags
 	;;
 	mov ar.ccv=r17
-	and r18=~_TIF_SIGPENDING,r17		// r18 <- r17 & ~(1 << TIF_SIGPENDING)
+	and r18=~_TIF_SIGPENDING,r17	// r18 <- r17 & ~(1 << TIF_SIGPENDING)
 	;;
 
-	st8 [r2]=r14				// update current->blocked with new mask
-	cmpxchg4.acq r8=[r9],r18,ar.ccv		// current->thread_info->flags <- r18
+	st8 [r2]=r14			// update current->blocked with new mask
+	cmpxchg4.acq r8=[r9],r18,ar.ccv	  // current->thread_info->flags <- r18
 	;;
-	cmp.ne p6,p0=r17,r8			// update failed?
-(p6)	br.cond.spnt.few 1b			// yes -> retry
+	cmp.ne p6,p0=r17,r8		// update failed?
+(p6)	br.cond.spnt.few 1b		// yes -> retry
 
 #ifdef CONFIG_SMP
-	st4.rel [r31]=r0			// release the lock
+	st4.rel [r31]=r0		// release the lock
 #endif
 	ssm psr.i
 	;;
 
-	srlz.d					// ensure psr.i is set again
-	mov r18=0					// i must not leak kernel bits...
+	srlz.d				// ensure psr.i is set again
+	mov r18=0			// i must not leak kernel bits...
 
 .store_mask:
-EX(.fail_efault, (p15) probe.w.fault r34, 3)	// verify user has write-access to *oset
+EX(.fail_efault, (p15) probe.w.fault r34, 3)	// verify user has write-access
+						//   to *oset
 EX(.fail_efault, (p15) st8 [r34]=r3)
-	mov r2=0					// i must not leak kernel bits...
-	mov r3=0					// i must not leak kernel bits...
+	mov r2=0				// must not leak kernel bits...
+	mov r3=0				// must not leak kernel bits...
 	mov r8=0				// return 0
-	mov r9=0					// i must not leak kernel bits...
-	mov r14=0					// i must not leak kernel bits...
-	mov r17=0					// i must not leak kernel bits...
-	mov r31=0					// i must not leak kernel bits...
+	mov r9=0				// must not leak kernel bits...
+	mov r14=0				// must not leak kernel bits...
+	mov r17=0				// must not leak kernel bits...
+	mov r31=0				// must not leak kernel bits...
 	FSYS_RETURN
 
 .sig_pending:
@@ -493,11 +521,15 @@ #endif
 	ssm psr.i
 	;;
 	srlz.d
-	br.sptk.many fsys_fallback_syscall	// with signal pending, do the heavy-weight syscall
+	br.sptk.many fsys_fallback_syscall	// with signal pending, do the
+						//   heavy-weight syscall
 
 #ifdef CONFIG_SMP
 .lock_contention:
-	/* Rather than spinning here, fall back on doing a heavy-weight syscall.  */
+	/*
+	 * Rather than spinning here, fall back on doing a heavy-weight 
+	 * syscall.
+	 */
 	ssm psr.i
 	;;
 	srlz.d
@@ -510,8 +542,9 @@ ENTRY(fsys_fallback_syscall)
 	.altrp b6
 	.body
 	/*
-	 * We only get here from light-weight syscall handlers.  Thus, we already
-	 * know that r15 contains a valid syscall number.  No need to re-check.
+	 * We only get here from light-weight syscall handlers.  Thus, we 
+	 * already know that r15 contains a valid syscall number.  No need 
+	 * to re-check.
 	 */
 	adds r17=-1024,r15
 	movl r14=sys_call_table
@@ -519,8 +552,10 @@ ENTRY(fsys_fallback_syscall)
 	rsm psr.i
 	shladd r18=r17,3,r14
 	;;
-	ld8 r18=[r18]				// load normal (heavy-weight) syscall entry-point
-	mov r29=psr				// read psr (12 cyc load latency)
+	ld8 r18=[r18]				// load normal (heavy-weight)
+						//   syscall entry-point
+	mov r29=psr				// read psr (12 cycle load
+						//   latency)
 	mov r27=ar.rsc
 	mov r21=ar.fpsr
 	mov r26=ar.pfs
@@ -593,48 +628,61 @@ #	define PSR_ONE_BITS		((3 << IA64_PSR_C
 	movl r28=__kernel_syscall_via_break	// X	create cr.iip
 	;;
 
-	mov r2=r16				// A    get task addr to addl-addressable register
+	mov r2=r16				// A    get task addr to 
+						//	addl-addressable
+						//	register
 	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A
 	mov r31=pr				// I0   save pr (2 cyc)
 	;;
-	st1 [r16]=r0				// M2|3 clear current->thread.on_ustack flag
+	st1 [r16]=r0		// M2|3 clear current->thread.on_ustack flag
 	addl r22=IA64_RBS_OFFSET,r2		// A    compute base of RBS
 	add r3=TI_FLAGS+IA64_TASK_SIZE,r2	// A
 	;;
-	ld4 r3=[r3]				// M0|1 r3 = current_thread_info()->flags
-	lfetch.fault.excl.nt1 [r22]		// M0|1 prefetch register backing-store
+	ld4 r3=[r3]		// M0|1 r3 = current_thread_info()->flags
+	lfetch.fault.excl.nt1 [r22]	// M0|1 prefetch register backing-store
 	nop.i 0
 	;;
-	mov ar.rsc=0				// M2   set enforced lazy mode, pl 0, LE, loadrs=0
+	mov ar.rsc=0				// M2   set enforced lazy mode,
+						//	pl 0, LE, loadrs=0
 	nop.m 0
 	nop.i 0
 	;;
 	mov r23=ar.bspstore			// M2 (12 cyc) save ar.bspstore
-	mov.m r24=ar.rnat			// M2 (5 cyc) read ar.rnat (dual-issues!)
+	mov.m r24=ar.rnat			// M2 (5 cyc) read ar.rnat 
+						//    (dual-issues!)
 	nop.i 0
 	;;
-	mov ar.bspstore=r22			// M2 (6 cyc) switch to kernel RBS
+	mov ar.bspstore=r22			// M2 (6 cyc) switch to kernel
+						//    RBS
 	movl r8=PSR_ONE_BITS			// X
 	;;
 	mov r25=ar.unat				// M2 (5 cyc) save ar.unat
 	mov r19=b6				// I0   save b6 (2 cyc)
 	mov r20=r1				// A    save caller's gp in r20
 	;;
-	or r29=r8,r29				// A    construct cr.ipsr value to save
-	mov b6=r18				// I0   copy syscall entry-point to b6 (7 cyc)
-	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack
-
-	mov r18=ar.bsp				// M2   save (kernel) ar.bsp (12 cyc)
-	cmp.ne pKStk,pUStk=r0,r0		// A    set pKStk <- 0, pUStk <- 1
+	or r29=r8,r29				// A    construct cr.ipsr value
+						//	to save
+	mov b6=r18				// I0   copy syscall entry 
+						//	point to b6 (7 cyc)
+	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of
+						     //   memory stack
+
+	mov r18=ar.bsp				// M2   save (kernel) ar.bsp
+						//	(12 cyc)
+	cmp.ne pKStk,pUStk=r0,r0		// A    set pKStk <- 0, 
+						//	pUStk <- 1
 	br.call.sptk.many b7=ia64_syscall_setup	// B
 	;;
-	mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
-	mov rp=r14				// I0   set the real return addr
+	mov ar.rsc=0x3				// M2   set eager mode, pl 0, 
+						//	LE, loadrs=0
+	mov rp=r14				// I0   set real return address
 	and r3=_TIF_SYSCALL_TRACEAUDIT,r3	// A
 	;;
-	ssm psr.i				// M2   we're on kernel stacks now, reenable irqs
+	ssm psr.i				// M2   we're on kernel stacks
+						//	now, reenable irqs
 	cmp.eq p8,p0=r3,r0			// A
-(p10)	br.cond.spnt.many ia64_ret_from_syscall	// B    return if bad call-frame or r15 is a NaT
+(p10)	br.cond.spnt.many ia64_ret_from_syscall	// B    return if bad call 
+						//	frame or r15 is a NaT
 
 	nop.m 0
 (p8)	br.call.sptk.many b6=b6			// B    (ignore return address)


-- 
Ciao,
al
----------------------------------------------------------------------
Al Stone                                      Alter Ego:
Open Source and Linux R&D                     Debian Developer
Hewlett-Packard Company                       http://www.debian.org
E-mail: ahs3@fc.hp.com                        ahs3@debian.org
----------------------------------------------------------------------

-
To unsubscribe from this list: send the line "unsubscribe linux-ia64" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Received on Thu Aug 24 00:25:44 2006

This archive was generated by hypermail 2.1.8 : 2006-08-24 00:25:55 EST