Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jan 2010 17:32:15 +0000 (09:32 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jan 2010 17:32:15 +0000 (09:32 -0800)
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] __per_cpu_idtrs[] is a memory hog
  [IA64] sanity in #include files.  Move fnptr to types.h
  [IA64] use helpers for rlimits
  [IA64] cpumask_of_node() should handle -1 as a node

arch/ia64/include/asm/ftrace.h
arch/ia64/include/asm/kprobes.h
arch/ia64/include/asm/tlb.h
arch/ia64/include/asm/topology.h
arch/ia64/include/asm/types.h
arch/ia64/kernel/mca.c
arch/ia64/kernel/perfmon.c
arch/ia64/mm/init.c
arch/ia64/mm/tlb.c

index d20db3c2a656719cc44a3ee3a9559737234c7a4b..fbd1a2470cae0cc9152621a3765814cbd53c2858 100644 (file)
@@ -8,7 +8,6 @@
 extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
 #define mcount _mcount
 
-#include <asm/kprobes.h>
 /* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
 #define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
 #define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
index dbf83fb28db318849f3a148d215e4977d6fd1760..d5505d6f2382bd00d298639595730000b9347021 100644 (file)
@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
        bundle_t bundle;
 } kprobe_opcode_t;
 
-struct fnptr {
-       unsigned long ip;
-       unsigned long gp;
-};
-
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
        /* copy of the instruction to be emulated */
index 85d965cb19a0835ace573551d667053fcc8fa260..23cce999eb1cadaef129afa9d342ca3bc87c2d54 100644 (file)
@@ -74,7 +74,7 @@ struct ia64_tr_entry {
 extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
 extern void ia64_ptr_entry(u64 target_mask, int slot);
 
-extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 
 /*
  region register macros
index 3ddb4e709dbadb1c36523e696a6ce32927c1c459..d323071d0f915e95ad60d6c45974a4e9788aedae 100644 (file)
@@ -33,7 +33,9 @@
 /*
  * Returns a bitmask of CPUs on Node 'node'.
  */
-#define cpumask_of_node(node) (&node_to_cpu_mask[node])
+#define cpumask_of_node(node) ((node) == -1 ?                          \
+                              cpu_all_mask :                           \
+                              &node_to_cpu_mask[node])
 
 /*
  * Returns the number of the node containing Node 'nid'.
index bcd260e597de32aa0a99e637cc0e01338ea721ae..b8e5d97be158a5b03797479b1ea6c202a38556c8 100644 (file)
 
 typedef unsigned int umode_t;
 
+struct fnptr {
+       unsigned long ip;
+       unsigned long gp;
+};
+
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
index 32f2639e9b0af540948a85ceb9878d33efd866c8..378b4833024f110ac7e11265341774f3a6ee75be 100644 (file)
@@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord)
        unsigned long psr;
        int cpu = smp_processor_id();
 
+       if (!ia64_idtrs[cpu])
+               return;
+
        psr = ia64_clear_ic();
        for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
-               p = &__per_cpu_idtrs[cpu][iord-1][i];
+               p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
                if (p->pte & 0x1) {
                        old_rr = ia64_get_rr(p->ifa);
                        if (old_rr != p->rr) {
index 5246285a95fb253187c774093743df35b76239aa..6bcbe215b9a418e555cad4793e20a6723d7b88f9 100644 (file)
@@ -2293,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
         * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
         *      return -ENOMEM;
         */
-       if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
+       if (size > task_rlimit(task, RLIMIT_MEMLOCK))
                return -ENOMEM;
 
        /*
index b9609c69343a373f5e742be333bcac7d24cf4381..7c0d4814a68dc12a2f5f2dca9b16f1631a7f8314 100644 (file)
@@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size)
 inline void
 ia64_set_rbs_bot (void)
 {
-       unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+       unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
 
        if (stack_size > MAX_USER_STACK_SIZE)
                stack_size = MAX_USER_STACK_SIZE;
index ee09d261f2e6c0be4071e1de30aee0339d6b9706..f3de9d7a98b481c632fa4960211213f0a3ee307d 100644 (file)
@@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
 DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
 
-struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 
 /*
  * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
        struct ia64_tr_entry *p;
        int cpu = smp_processor_id();
 
+       if (!ia64_idtrs[cpu]) {
+               ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
+                               sizeof (struct ia64_tr_entry), GFP_KERNEL);
+               if (!ia64_idtrs[cpu])
+                       return -ENOMEM;
+       }
        r = -EINVAL;
        /*Check overlap with existing TR entries*/
        if (target_mask & 0x1) {
-               p = &__per_cpu_idtrs[cpu][0][0];
+               p = ia64_idtrs[cpu];
                for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
                                                                i++, p++) {
                        if (p->pte & 0x1)
@@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
                }
        }
        if (target_mask & 0x2) {
-               p = &__per_cpu_idtrs[cpu][1][0];
+               p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
                for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
                                                                i++, p++) {
                        if (p->pte & 0x1)
@@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
        for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
                switch (target_mask & 0x3) {
                case 1:
-                       if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
+                       if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
                                goto found;
                        continue;
                case 2:
-                       if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+                       if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
                                goto found;
                        continue;
                case 3:
-                       if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
-                               !(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+                       if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
+                           !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
                                goto found;
                        continue;
                default:
@@ -488,7 +494,7 @@ found:
        if (target_mask & 0x1) {
                ia64_itr(0x1, i, va, pte, log_size);
                ia64_srlz_i();
-               p = &__per_cpu_idtrs[cpu][0][i];
+               p = ia64_idtrs[cpu] + i;
                p->ifa = va;
                p->pte = pte;
                p->itir = log_size << 2;
@@ -497,7 +503,7 @@ found:
        if (target_mask & 0x2) {
                ia64_itr(0x2, i, va, pte, log_size);
                ia64_srlz_i();
-               p = &__per_cpu_idtrs[cpu][1][i];
+               p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
                p->ifa = va;
                p->pte = pte;
                p->itir = log_size << 2;
@@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
                return;
 
        if (target_mask & 0x1) {
-               p = &__per_cpu_idtrs[cpu][0][slot];
+               p = ia64_idtrs[cpu] + slot;
                if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
                        p->pte = 0;
                        ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
        }
 
        if (target_mask & 0x2) {
-               p = &__per_cpu_idtrs[cpu][1][slot];
+               p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
                if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
                        p->pte = 0;
                        ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
        }
 
        for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
-               if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
-                               (__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+               if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
+                   ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
                        break;
        }
        per_cpu(ia64_tr_used, cpu) = i;