include/asm-x86/posix_types_32.h: checkpatch cleanups - formatting only
[sfrench/cifs-2.6.git] / arch / powerpc / kernel / entry_64.S
index 2551c0884afcd178d957ce02304ccef119e5870d..148a3547c9aabbb04dc29a8f74a8a0cfd46bdf06 100644 (file)
@@ -172,13 +172,18 @@ syscall_error_cont:
        stdcx.  r0,0,r1                 /* to clear the reservation */
        andi.   r6,r8,MSR_PR
        ld      r4,_LINK(r1)
+       /*
+        * Clear RI before restoring r13.  If we are returning to
+        * userspace and we take an exception after restoring r13,
+        * we end up corrupting the userspace r13 value.
+        */
+       li      r12,MSR_RI
+       andc    r11,r10,r12
+       mtmsrd  r11,1                   /* clear MSR.RI */
        beq-    1f
        ACCOUNT_CPU_USER_EXIT(r11, r12)
        ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
 1:     ld      r2,GPR2(r1)
-       li      r12,MSR_RI
-       andc    r11,r10,r12
-       mtmsrd  r11,1                   /* clear MSR.RI */
        ld      r1,GPR1(r1)
        mtlr    r4
        mtcr    r5
@@ -367,9 +372,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        std     r6,PACACURRENT(r13)     /* Set new 'current' */
 
        ld      r8,KSP(r4)      /* new stack pointer */
+BEGIN_FTR_SECTION
+       b       2f
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
 BEGIN_FTR_SECTION
        clrrdi  r6,r8,28        /* get its ESID */
        clrrdi  r9,r1,28        /* get current sp ESID */
+END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
+BEGIN_FTR_SECTION
+       clrrdi  r6,r8,40        /* get its 1T ESID */
+       clrrdi  r9,r1,40        /* get current sp 1T ESID */
+END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
        clrldi. r0,r6,2         /* is new ESID c00000000? */
        cmpd    cr1,r6,r9       /* or is new ESID the same as current ESID? */
        cror    eq,4*cr1+eq,eq
@@ -379,21 +392,34 @@ BEGIN_FTR_SECTION
        ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
        oris    r0,r6,(SLB_ESID_V)@h
        ori     r0,r0,(SLB_NUM_BOLTED-1)@l
-
-       /* Update the last bolted SLB */
+BEGIN_FTR_SECTION
+       li      r9,MMU_SEGSIZE_1T       /* insert B field */
+       oris    r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
+       rldimi  r7,r9,SLB_VSID_SSIZE_SHIFT,0
+END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+
+       /* Update the last bolted SLB.  No write barriers are needed
+        * here, provided we only update the current CPU's SLB shadow
+        * buffer.
+        */
        ld      r9,PACA_SLBSHADOWPTR(r13)
        li      r12,0
        std     r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
        std     r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
        std     r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
 
+       /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
+        * we have 1TB segments, the only CPUs known to have the errata
+        * only support less than 1TB of system memory and we'll never
+        * actually hit this code path.
+        */
+
        slbie   r6
        slbie   r6              /* Workaround POWER5 < DD2.1 issue */
        slbmte  r7,r0
        isync
 
 2:
-END_FTR_SECTION_IFSET(CPU_FTR_SLB)
        clrrdi  r7,r8,THREAD_SHIFT      /* base of new stack */
        /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
           because we don't need to leave the 288-byte ABI gap at the
@@ -488,42 +514,44 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
        stb     r5,PACASOFTIRQEN(r13)
 
+       /* extract EE bit and use it to restore paca->hard_enabled */
        ld      r3,_MSR(r1)
+       rldicl  r4,r3,49,63             /* r0 = (r3 >> 15) & 1 */
+       stb     r4,PACAHARDIRQEN(r13)
+
+       ld      r4,_CTR(r1)
+       ld      r0,_LINK(r1)
+       mtctr   r4
+       mtlr    r0
+       ld      r4,_XER(r1)
+       mtspr   SPRN_XER,r4
+
+       REST_8GPRS(5, r1)
+
        andi.   r0,r3,MSR_RI
        beq-    unrecov_restore
 
-       /* extract EE bit and use it to restore paca->hard_enabled */
-       rldicl  r4,r3,49,63             /* r0 = (r3 >> 15) & 1 */
-       stb     r4,PACAHARDIRQEN(r13)
+       stdcx.  r0,0,r1         /* to clear the reservation */
 
-       andi.   r0,r3,MSR_PR
+       /*
+        * Clear RI before restoring r13.  If we are returning to
+        * userspace and we take an exception after restoring r13,
+        * we end up corrupting the userspace r13 value.
+        */
+       mfmsr   r4
+       andc    r4,r4,r0        /* r0 contains MSR_RI here */
+       mtmsrd  r4,1
 
        /*
         * r13 is our per cpu area, only restore it if we are returning to
         * userspace
         */
+       andi.   r0,r3,MSR_PR
        beq     1f
-       ACCOUNT_CPU_USER_EXIT(r3, r4)
+       ACCOUNT_CPU_USER_EXIT(r2, r4)
        REST_GPR(13, r1)
 1:
-       ld      r3,_CTR(r1)
-       ld      r0,_LINK(r1)
-       mtctr   r3
-       mtlr    r0
-       ld      r3,_XER(r1)
-       mtspr   SPRN_XER,r3
-
-       REST_8GPRS(5, r1)
-
-       stdcx.  r0,0,r1         /* to clear the reservation */
-
-       mfmsr   r0
-       li      r2, MSR_RI
-       andc    r0,r0,r2
-       mtmsrd  r0,1
-
-       ld      r0,_MSR(r1)
-       mtspr   SPRN_SRR1,r0
+       mtspr   SPRN_SRR1,r3
 
        ld      r2,_CCR(r1)
        mtcrf   0xFF,r2
@@ -539,7 +567,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
        rfid
        b       .       /* prevent speculative execution */
 
-/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
 do_work:
 #ifdef CONFIG_PREEMPT
        andi.   r0,r3,MSR_PR    /* Returning to user mode? */