Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch
[sfrench/cifs-2.6.git] / arch / sparc / kernel / unaligned_64.c
1 /*
2  * unaligned.c: Unaligned load/store trap handling with special
3  *              cases for the kernel to do them more quickly.
4  *
5  * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
6  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8
9
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <asm/asi.h>
16 #include <asm/ptrace.h>
17 #include <asm/pstate.h>
18 #include <asm/processor.h>
19 #include <asm/uaccess.h>
20 #include <linux/smp.h>
21 #include <linux/bitops.h>
22 #include <linux/perf_event.h>
23 #include <linux/ratelimit.h>
24 #include <asm/fpumacro.h>
25 #include <asm/cacheflush.h>
26
27 enum direction {
28         load,    /* ld, ldd, ldh, ldsh */
29         store,   /* st, std, sth, stsh */
30         both,    /* Swap, ldstub, cas, ... */
31         fpld,
32         fpst,
33         invalid,
34 };
35
36 static inline enum direction decode_direction(unsigned int insn)
37 {
38         unsigned long tmp = (insn >> 21) & 1;
39
40         if (!tmp)
41                 return load;
42         else {
43                 switch ((insn>>19)&0xf) {
44                 case 15: /* swap* */
45                         return both;
46                 default:
47                         return store;
48                 }
49         }
50 }
51
52 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
53 static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
54 {
55         unsigned int tmp;
56
57         tmp = ((insn >> 19) & 0xf);
58         if (tmp == 11 || tmp == 14) /* ldx/stx */
59                 return 8;
60         tmp &= 3;
61         if (!tmp)
62                 return 4;
63         else if (tmp == 3)
64                 return 16;      /* ldd/std - Although it is actually 8 */
65         else if (tmp == 2)
66                 return 2;
67         else {
68                 printk("Impossible unaligned trap. insn=%08x\n", insn);
69                 die_if_kernel("Byte sized unaligned access?!?!", regs);
70
71                 /* GCC should never warn that control reaches the end
72                  * of this function without returning a value because
73                  * die_if_kernel() is marked with attribute 'noreturn'.
74                  * Alas, some versions do...
75                  */
76
77                 return 0;
78         }
79 }
80
81 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
82 {
83         if (insn & 0x800000) {
84                 if (insn & 0x2000)
85                         return (unsigned char)(regs->tstate >> 24);     /* %asi */
86                 else
87                         return (unsigned char)(insn >> 5);              /* imm_asi */
88         } else
89                 return ASI_P;
90 }
91
92 /* 0x400000 = signed, 0 = unsigned */
93 static inline int decode_signedness(unsigned int insn)
94 {
95         return (insn & 0x400000);
96 }
97
98 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
99                                        unsigned int rd, int from_kernel)
100 {
101         if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
102                 if (from_kernel != 0)
103                         __asm__ __volatile__("flushw");
104                 else
105                         flushw_user();
106         }
107 }
108
109 static inline long sign_extend_imm13(long imm)
110 {
111         return imm << 51 >> 51;
112 }
113
114 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
115 {
116         unsigned long value, fp;
117         
118         if (reg < 16)
119                 return (!reg ? 0 : regs->u_regs[reg]);
120
121         fp = regs->u_regs[UREG_FP];
122
123         if (regs->tstate & TSTATE_PRIV) {
124                 struct reg_window *win;
125                 win = (struct reg_window *)(fp + STACK_BIAS);
126                 value = win->locals[reg - 16];
127         } else if (!test_thread_64bit_stack(fp)) {
128                 struct reg_window32 __user *win32;
129                 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
130                 get_user(value, &win32->locals[reg - 16]);
131         } else {
132                 struct reg_window __user *win;
133                 win = (struct reg_window __user *)(fp + STACK_BIAS);
134                 get_user(value, &win->locals[reg - 16]);
135         }
136         return value;
137 }
138
139 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
140 {
141         unsigned long fp;
142
143         if (reg < 16)
144                 return &regs->u_regs[reg];
145
146         fp = regs->u_regs[UREG_FP];
147
148         if (regs->tstate & TSTATE_PRIV) {
149                 struct reg_window *win;
150                 win = (struct reg_window *)(fp + STACK_BIAS);
151                 return &win->locals[reg - 16];
152         } else if (!test_thread_64bit_stack(fp)) {
153                 struct reg_window32 *win32;
154                 win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
155                 return (unsigned long *)&win32->locals[reg - 16];
156         } else {
157                 struct reg_window *win;
158                 win = (struct reg_window *)(fp + STACK_BIAS);
159                 return &win->locals[reg - 16];
160         }
161 }
162
163 unsigned long compute_effective_address(struct pt_regs *regs,
164                                         unsigned int insn, unsigned int rd)
165 {
166         unsigned int rs1 = (insn >> 14) & 0x1f;
167         unsigned int rs2 = insn & 0x1f;
168         int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
169
170         if (insn & 0x2000) {
171                 maybe_flush_windows(rs1, 0, rd, from_kernel);
172                 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
173         } else {
174                 maybe_flush_windows(rs1, rs2, rd, from_kernel);
175                 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
176         }
177 }
178
179 /* This is just to make gcc think die_if_kernel does return... */
180 static void __used unaligned_panic(char *str, struct pt_regs *regs)
181 {
182         die_if_kernel(str, regs);
183 }
184
185 extern int do_int_load(unsigned long *dest_reg, int size,
186                        unsigned long *saddr, int is_signed, int asi);
187         
188 extern int __do_int_store(unsigned long *dst_addr, int size,
189                           unsigned long src_val, int asi);
190
191 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
192                                struct pt_regs *regs, int asi, int orig_asi)
193 {
194         unsigned long zero = 0;
195         unsigned long *src_val_p = &zero;
196         unsigned long src_val;
197
198         if (size == 16) {
199                 size = 8;
200                 zero = (((long)(reg_num ?
201                         (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
202                         (unsigned)fetch_reg(reg_num + 1, regs);
203         } else if (reg_num) {
204                 src_val_p = fetch_reg_addr(reg_num, regs);
205         }
206         src_val = *src_val_p;
207         if (unlikely(asi != orig_asi)) {
208                 switch (size) {
209                 case 2:
210                         src_val = swab16(src_val);
211                         break;
212                 case 4:
213                         src_val = swab32(src_val);
214                         break;
215                 case 8:
216                         src_val = swab64(src_val);
217                         break;
218                 case 16:
219                 default:
220                         BUG();
221                         break;
222                 }
223         }
224         return __do_int_store(dst_addr, size, src_val, asi);
225 }
226
227 static inline void advance(struct pt_regs *regs)
228 {
229         regs->tpc   = regs->tnpc;
230         regs->tnpc += 4;
231         if (test_thread_flag(TIF_32BIT)) {
232                 regs->tpc &= 0xffffffff;
233                 regs->tnpc &= 0xffffffff;
234         }
235 }
236
237 static inline int floating_point_load_or_store_p(unsigned int insn)
238 {
239         return (insn >> 24) & 1;
240 }
241
242 static inline int ok_for_kernel(unsigned int insn)
243 {
244         return !floating_point_load_or_store_p(insn);
245 }
246
247 static void kernel_mna_trap_fault(int fixup_tstate_asi)
248 {
249         struct pt_regs *regs = current_thread_info()->kern_una_regs;
250         unsigned int insn = current_thread_info()->kern_una_insn;
251         const struct exception_table_entry *entry;
252
253         entry = search_exception_tables(regs->tpc);
254         if (!entry) {
255                 unsigned long address;
256
257                 address = compute_effective_address(regs, insn,
258                                                     ((insn >> 25) & 0x1f));
259                 if (address < PAGE_SIZE) {
260                         printk(KERN_ALERT "Unable to handle kernel NULL "
261                                "pointer dereference in mna handler");
262                 } else
263                         printk(KERN_ALERT "Unable to handle kernel paging "
264                                "request in mna handler");
265                 printk(KERN_ALERT " at virtual address %016lx\n",address);
266                 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
267                         (current->mm ? CTX_HWBITS(current->mm->context) :
268                         CTX_HWBITS(current->active_mm->context)));
269                 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
270                         (current->mm ? (unsigned long) current->mm->pgd :
271                         (unsigned long) current->active_mm->pgd));
272                 die_if_kernel("Oops", regs);
273                 /* Not reached */
274         }
275         regs->tpc = entry->fixup;
276         regs->tnpc = regs->tpc + 4;
277
278         if (fixup_tstate_asi) {
279                 regs->tstate &= ~TSTATE_ASI;
280                 regs->tstate |= (ASI_AIUS << 24UL);
281         }
282 }
283
284 static void log_unaligned(struct pt_regs *regs)
285 {
286         static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
287
288         if (__ratelimit(&ratelimit)) {
289                 printk("Kernel unaligned access at TPC[%lx] %pS\n",
290                        regs->tpc, (void *) regs->tpc);
291         }
292 }
293
294 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
295 {
296         enum direction dir = decode_direction(insn);
297         int size = decode_access_size(regs, insn);
298         int orig_asi, asi;
299
300         current_thread_info()->kern_una_regs = regs;
301         current_thread_info()->kern_una_insn = insn;
302
303         orig_asi = asi = decode_asi(insn, regs);
304
305         /* If this is a {get,put}_user() on an unaligned userspace pointer,
306          * just signal a fault and do not log the event.
307          */
308         if (asi == ASI_AIUS) {
309                 kernel_mna_trap_fault(0);
310                 return;
311         }
312
313         log_unaligned(regs);
314
315         if (!ok_for_kernel(insn) || dir == both) {
316                 printk("Unsupported unaligned load/store trap for kernel "
317                        "at <%016lx>.\n", regs->tpc);
318                 unaligned_panic("Kernel does fpu/atomic "
319                                 "unaligned load/store.", regs);
320
321                 kernel_mna_trap_fault(0);
322         } else {
323                 unsigned long addr, *reg_addr;
324                 int err;
325
326                 addr = compute_effective_address(regs, insn,
327                                                  ((insn >> 25) & 0x1f));
328                 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
329                 switch (asi) {
330                 case ASI_NL:
331                 case ASI_AIUPL:
332                 case ASI_AIUSL:
333                 case ASI_PL:
334                 case ASI_SL:
335                 case ASI_PNFL:
336                 case ASI_SNFL:
337                         asi &= ~0x08;
338                         break;
339                 }
340                 switch (dir) {
341                 case load:
342                         reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
343                         err = do_int_load(reg_addr, size,
344                                           (unsigned long *) addr,
345                                           decode_signedness(insn), asi);
346                         if (likely(!err) && unlikely(asi != orig_asi)) {
347                                 unsigned long val_in = *reg_addr;
348                                 switch (size) {
349                                 case 2:
350                                         val_in = swab16(val_in);
351                                         break;
352                                 case 4:
353                                         val_in = swab32(val_in);
354                                         break;
355                                 case 8:
356                                         val_in = swab64(val_in);
357                                         break;
358                                 case 16:
359                                 default:
360                                         BUG();
361                                         break;
362                                 }
363                                 *reg_addr = val_in;
364                         }
365                         break;
366
367                 case store:
368                         err = do_int_store(((insn>>25)&0x1f), size,
369                                            (unsigned long *) addr, regs,
370                                            asi, orig_asi);
371                         break;
372
373                 default:
374                         panic("Impossible kernel unaligned trap.");
375                         /* Not reached... */
376                 }
377                 if (unlikely(err))
378                         kernel_mna_trap_fault(1);
379                 else
380                         advance(regs);
381         }
382 }
383
384 int handle_popc(u32 insn, struct pt_regs *regs)
385 {
386         int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
387         int ret, rd = ((insn >> 25) & 0x1f);
388         u64 value;
389                                 
390         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
391         if (insn & 0x2000) {
392                 maybe_flush_windows(0, 0, rd, from_kernel);
393                 value = sign_extend_imm13(insn);
394         } else {
395                 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
396                 value = fetch_reg(insn & 0x1f, regs);
397         }
398         ret = hweight64(value);
399         if (rd < 16) {
400                 if (rd)
401                         regs->u_regs[rd] = ret;
402         } else {
403                 unsigned long fp = regs->u_regs[UREG_FP];
404
405                 if (!test_thread_64bit_stack(fp)) {
406                         struct reg_window32 __user *win32;
407                         win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
408                         put_user(ret, &win32->locals[rd - 16]);
409                 } else {
410                         struct reg_window __user *win;
411                         win = (struct reg_window __user *)(fp + STACK_BIAS);
412                         put_user(ret, &win->locals[rd - 16]);
413                 }
414         }
415         advance(regs);
416         return 1;
417 }
418
419 extern void do_fpother(struct pt_regs *regs);
420 extern void do_privact(struct pt_regs *regs);
421 extern void spitfire_data_access_exception(struct pt_regs *regs,
422                                            unsigned long sfsr,
423                                            unsigned long sfar);
424 extern void sun4v_data_access_exception(struct pt_regs *regs,
425                                         unsigned long addr,
426                                         unsigned long type_ctx);
427
428 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
429 {
430         unsigned long addr = compute_effective_address(regs, insn, 0);
431         int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
432         struct fpustate *f = FPUSTATE;
433         int asi = decode_asi(insn, regs);
434         int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
435
436         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
437
438         save_and_clear_fpu();
439         current_thread_info()->xfsr[0] &= ~0x1c000;
440         if (freg & 3) {
441                 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
442                 do_fpother(regs);
443                 return 0;
444         }
445         if (insn & 0x200000) {
446                 /* STQ */
447                 u64 first = 0, second = 0;
448                 
449                 if (current_thread_info()->fpsaved[0] & flag) {
450                         first = *(u64 *)&f->regs[freg];
451                         second = *(u64 *)&f->regs[freg+2];
452                 }
453                 if (asi < 0x80) {
454                         do_privact(regs);
455                         return 1;
456                 }
457                 switch (asi) {
458                 case ASI_P:
459                 case ASI_S: break;
460                 case ASI_PL:
461                 case ASI_SL: 
462                         {
463                                 /* Need to convert endians */
464                                 u64 tmp = __swab64p(&first);
465                                 
466                                 first = __swab64p(&second);
467                                 second = tmp;
468                                 break;
469                         }
470                 default:
471                         if (tlb_type == hypervisor)
472                                 sun4v_data_access_exception(regs, addr, 0);
473                         else
474                                 spitfire_data_access_exception(regs, 0, addr);
475                         return 1;
476                 }
477                 if (put_user (first >> 32, (u32 __user *)addr) ||
478                     __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
479                     __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
480                     __put_user ((u32)second, (u32 __user *)(addr + 12))) {
481                         if (tlb_type == hypervisor)
482                                 sun4v_data_access_exception(regs, addr, 0);
483                         else
484                                 spitfire_data_access_exception(regs, 0, addr);
485                         return 1;
486                 }
487         } else {
488                 /* LDF, LDDF, LDQF */
489                 u32 data[4] __attribute__ ((aligned(8)));
490                 int size, i;
491                 int err;
492
493                 if (asi < 0x80) {
494                         do_privact(regs);
495                         return 1;
496                 } else if (asi > ASI_SNFL) {
497                         if (tlb_type == hypervisor)
498                                 sun4v_data_access_exception(regs, addr, 0);
499                         else
500                                 spitfire_data_access_exception(regs, 0, addr);
501                         return 1;
502                 }
503                 switch (insn & 0x180000) {
504                 case 0x000000: size = 1; break;
505                 case 0x100000: size = 4; break;
506                 default: size = 2; break;
507                 }
508                 for (i = 0; i < size; i++)
509                         data[i] = 0;
510                 
511                 err = get_user (data[0], (u32 __user *) addr);
512                 if (!err) {
513                         for (i = 1; i < size; i++)
514                                 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
515                 }
516                 if (err && !(asi & 0x2 /* NF */)) {
517                         if (tlb_type == hypervisor)
518                                 sun4v_data_access_exception(regs, addr, 0);
519                         else
520                                 spitfire_data_access_exception(regs, 0, addr);
521                         return 1;
522                 }
523                 if (asi & 0x8) /* Little */ {
524                         u64 tmp;
525
526                         switch (size) {
527                         case 1: data[0] = le32_to_cpup(data + 0); break;
528                         default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
529                                 break;
530                         case 4: tmp = le64_to_cpup((u64 *)(data + 0));
531                                 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
532                                 *(u64 *)(data + 2) = tmp;
533                                 break;
534                         }
535                 }
536                 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
537                         current_thread_info()->fpsaved[0] = FPRS_FEF;
538                         current_thread_info()->gsr[0] = 0;
539                 }
540                 if (!(current_thread_info()->fpsaved[0] & flag)) {
541                         if (freg < 32)
542                                 memset(f->regs, 0, 32*sizeof(u32));
543                         else
544                                 memset(f->regs+32, 0, 32*sizeof(u32));
545                 }
546                 memcpy(f->regs + freg, data, size * 4);
547                 current_thread_info()->fpsaved[0] |= flag;
548         }
549         advance(regs);
550         return 1;
551 }
552
553 void handle_ld_nf(u32 insn, struct pt_regs *regs)
554 {
555         int rd = ((insn >> 25) & 0x1f);
556         int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
557         unsigned long *reg;
558                                 
559         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
560
561         maybe_flush_windows(0, 0, rd, from_kernel);
562         reg = fetch_reg_addr(rd, regs);
563         if (from_kernel || rd < 16) {
564                 reg[0] = 0;
565                 if ((insn & 0x780000) == 0x180000)
566                         reg[1] = 0;
567         } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
568                 put_user(0, (int __user *) reg);
569                 if ((insn & 0x780000) == 0x180000)
570                         put_user(0, ((int __user *) reg) + 1);
571         } else {
572                 put_user(0, (unsigned long __user *) reg);
573                 if ((insn & 0x780000) == 0x180000)
574                         put_user(0, (unsigned long __user *) reg + 1);
575         }
576         advance(regs);
577 }
578
579 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
580 {
581         unsigned long pc = regs->tpc;
582         unsigned long tstate = regs->tstate;
583         u32 insn;
584         u64 value;
585         u8 freg;
586         int flag;
587         struct fpustate *f = FPUSTATE;
588
589         if (tstate & TSTATE_PRIV)
590                 die_if_kernel("lddfmna from kernel", regs);
591         perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
592         if (test_thread_flag(TIF_32BIT))
593                 pc = (u32)pc;
594         if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
595                 int asi = decode_asi(insn, regs);
596                 u32 first, second;
597                 int err;
598
599                 if ((asi > ASI_SNFL) ||
600                     (asi < ASI_P))
601                         goto daex;
602                 first = second = 0;
603                 err = get_user(first, (u32 __user *)sfar);
604                 if (!err)
605                         err = get_user(second, (u32 __user *)(sfar + 4));
606                 if (err) {
607                         if (!(asi & 0x2))
608                                 goto daex;
609                         first = second = 0;
610                 }
611                 save_and_clear_fpu();
612                 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
613                 value = (((u64)first) << 32) | second;
614                 if (asi & 0x8) /* Little */
615                         value = __swab64p(&value);
616                 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
617                 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
618                         current_thread_info()->fpsaved[0] = FPRS_FEF;
619                         current_thread_info()->gsr[0] = 0;
620                 }
621                 if (!(current_thread_info()->fpsaved[0] & flag)) {
622                         if (freg < 32)
623                                 memset(f->regs, 0, 32*sizeof(u32));
624                         else
625                                 memset(f->regs+32, 0, 32*sizeof(u32));
626                 }
627                 *(u64 *)(f->regs + freg) = value;
628                 current_thread_info()->fpsaved[0] |= flag;
629         } else {
630 daex:
631                 if (tlb_type == hypervisor)
632                         sun4v_data_access_exception(regs, sfar, sfsr);
633                 else
634                         spitfire_data_access_exception(regs, sfsr, sfar);
635                 return;
636         }
637         advance(regs);
638 }
639
640 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
641 {
642         unsigned long pc = regs->tpc;
643         unsigned long tstate = regs->tstate;
644         u32 insn;
645         u64 value;
646         u8 freg;
647         int flag;
648         struct fpustate *f = FPUSTATE;
649
650         if (tstate & TSTATE_PRIV)
651                 die_if_kernel("stdfmna from kernel", regs);
652         perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
653         if (test_thread_flag(TIF_32BIT))
654                 pc = (u32)pc;
655         if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
656                 int asi = decode_asi(insn, regs);
657                 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
658                 value = 0;
659                 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
660                 if ((asi > ASI_SNFL) ||
661                     (asi < ASI_P))
662                         goto daex;
663                 save_and_clear_fpu();
664                 if (current_thread_info()->fpsaved[0] & flag)
665                         value = *(u64 *)&f->regs[freg];
666                 switch (asi) {
667                 case ASI_P:
668                 case ASI_S: break;
669                 case ASI_PL:
670                 case ASI_SL: 
671                         value = __swab64p(&value); break;
672                 default: goto daex;
673                 }
674                 if (put_user (value >> 32, (u32 __user *) sfar) ||
675                     __put_user ((u32)value, (u32 __user *)(sfar + 4)))
676                         goto daex;
677         } else {
678 daex:
679                 if (tlb_type == hypervisor)
680                         sun4v_data_access_exception(regs, sfar, sfsr);
681                 else
682                         spitfire_data_access_exception(regs, sfsr, sfar);
683                 return;
684         }
685         advance(regs);
686 }