* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel:
sched: add arch_update_cpu_topology hook.
sched: add exported arch_reinit_sched_domains() to header file.
sched: remove double unlikely from schedule()
sched: cleanup old and rarely used 'debug' features.
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
+#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/gart.h>
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, __pa(p));
insert_aperture_resource((u32)__pa(p), aper_size);
+ register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
+ (u32)__pa(p+aper_size) >> PAGE_SHIFT);
return (u32)__pa(p);
}
trim_size = end_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
- add_memory_region(trim_start, trim_size, E820_RESERVED);
+ update_memory_range(trim_start, trim_size, E820_RAM,
+ E820_RESERVED);
update_e820();
return 1;
}
return 0;
}
early_param("memmap", parse_memmap);
+void __init update_memory_range(u64 start, u64 size, unsigned old_type,
+ unsigned new_type)
+{
+ int i;
+
+ BUG_ON(old_type == new_type);
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ u64 final_start, final_end;
+ if (ei->type != old_type)
+ continue;
+ /* totally covered? */
+ if (ei->addr >= start && ei->size <= size) {
+ ei->type = new_type;
+ continue;
+ }
+ /* partially covered */
+ final_start = max(start, ei->addr);
+ final_end = min(start + size, ei->addr + ei->size);
+ if (final_start >= final_end)
+ continue;
+ add_memory_region(final_start, final_end - final_start,
+ new_type);
+ }
+}
void __init update_e820(void)
{
u8 nr_map;
}
}
+void __init update_memory_range(u64 start, u64 size, unsigned old_type,
+ unsigned new_type)
+{
+ int i;
+
+ BUG_ON(old_type == new_type);
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ u64 final_start, final_end;
+ if (ei->type != old_type)
+ continue;
+ /* totally covered? */
+ if (ei->addr >= start && ei->size <= size) {
+ ei->type = new_type;
+ continue;
+ }
+ /* partially covered */
+ final_start = max(start, ei->addr);
+ final_end = min(start + size, ei->addr + ei->size);
+ if (final_start >= final_end)
+ continue;
+ add_memory_region(final_start, final_end - final_start,
+ new_type);
+ }
+}
+
void __init update_e820(void)
{
u8 nr_map;
.asciz "Unknown interrupt or fault at EIP %p %p %p\n"
fault_msg:
- .ascii \
+ .asciz \
/* fault info: */ "BUG: Int %d: CR2 %p\n" \
/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \
" EBX %p EDX %p ECX %p EAX %p\n" \
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/dmar.h>
+#include <linux/bootmem.h>
+#include <asm/proto.h>
#include <asm/io.h>
#include <asm/gart.h>
#include <asm/calgary.h>
int node;
node = dev_to_node(dev);
- if (node == -1)
- node = numa_node_id();
-
- if (node < first_node(node_online_map))
- node = first_node(node_online_map);
page = alloc_pages_node(node, gfp, order);
return page ? page_address(page) : NULL;
}
early_param("iommu", iommu_setup);
+static __initdata void *dma32_bootmem_ptr;
+static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
+
+static int __init parse_dma32_size_opt(char *p)
+{
+ if (!p)
+ return -EINVAL;
+ dma32_bootmem_size = memparse(p, &p);
+ return 0;
+}
+early_param("dma32_size", parse_dma32_size_opt);
+
+void __init dma32_reserve_bootmem(void)
+{
+ unsigned long size, align;
+ if (end_pfn <= MAX_DMA32_PFN)
+ return;
+
+ align = 64ULL<<20;
+ size = round_up(dma32_bootmem_size, align);
+ dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
+ __pa(MAX_DMA_ADDRESS));
+ if (dma32_bootmem_ptr)
+ dma32_bootmem_size = size;
+ else
+ dma32_bootmem_size = 0;
+}
+static void __init dma32_free_bootmem(void)
+{
+ int node;
+
+ if (end_pfn <= MAX_DMA32_PFN)
+ return;
+
+ if (!dma32_bootmem_ptr)
+ return;
+
+ for_each_online_node(node)
+ free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
+ dma32_bootmem_size);
+
+ dma32_bootmem_ptr = NULL;
+ dma32_bootmem_size = 0;
+}
+
void __init pci_iommu_alloc(void)
{
+ /* free the range so iommu could get some range less than 4G */
+ dma32_free_bootmem();
/*
* The order of these functions is important for
* fall-back/fail-over reasons
nvidia_force_enable_hpet);
/* LPC bridges */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
+ nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
},
},
+ { /* Handle problems with rebooting on Dell Optiplex 745's DFF*/
+ .callback = set_bios_reboot,
+ .ident = "Dell OptiPlex 745",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
+ },
+ },
+ { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
+ .callback = set_bios_reboot,
+ .ident = "Dell OptiPlex 745",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
+ },
+ },
{ /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400",
printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
for_each_cpu_mask (i, cpu_possible_map) {
char *ptr;
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ ptr = alloc_bootmem_pages(size);
+#else
+ int node = early_cpu_to_node(i);
- if (!NODE_DATA(early_cpu_to_node(i))) {
- printk("cpu with no node %d, num_online_nodes %d\n",
- i, num_online_nodes());
+ if (!node_online(node) || !NODE_DATA(node))
ptr = alloc_bootmem_pages(size);
- } else {
- ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size);
- }
+ else
+ ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
+#endif
if (!ptr)
panic("Cannot allocate cpu data for CPU %d\n", i);
cpu_pda(i)->data_offset = ptr - __per_cpu_start;
early_res_to_bootmem();
+ dma32_reserve_bootmem();
+
#ifdef CONFIG_ACPI_SLEEP
/*
* Reserve low memory region for sleep support.
*/
set_fixmap(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE);
setup_local_APIC();
- printk(KERN_INFO "Local APIC Version %#lx, ID %#lx\n",
- apic_read(APIC_LVR), apic_read(APIC_ID));
+ printk(KERN_INFO "Local APIC Version %#x, ID %#x\n",
+ (unsigned int)apic_read(APIC_LVR),
+ (unsigned int)apic_read(APIC_ID));
set_fixmap(FIX_CO_CPU, CO_CPU_PHYS);
set_fixmap(FIX_CO_APIC, CO_APIC_PHYS);
bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
if (bootmap == NULL) {
if (nodedata_phys < start || nodedata_phys >= end)
- free_bootmem((unsigned long)node_data[nodeid],
- pgdat_size);
+ free_bootmem(nodedata_phys, pgdat_size);
node_data[nodeid] = NULL;
return;
}
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 3)) \
- __ret = __cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
+ __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
else \
- __ret = cmpxchg_386((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
+ __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
+ (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
__ret; \
})
#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 3)) \
- __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
+ __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
+ (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
else \
- __ret = cmpxchg_386((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
+ __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
+ (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
__ret; \
})
#endif
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
- __ret = __cmpxchg64((ptr), (unsigned long long)(o), \
+ __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
(unsigned long long)(n)); \
else \
- __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \
+ __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
+ (unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
- __ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \
+ __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
+ (unsigned long long)(o), \
(unsigned long long)(n)); \
else \
- __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \
+ __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
+ (unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
extern void register_bootmem_low_pages(unsigned long max_low_pfn);
extern void add_memory_region(unsigned long long start,
unsigned long long size, int type);
+extern void update_memory_range(u64 start, u64 size, unsigned old_type,
+ unsigned new_type);
extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who);
unsigned size, unsigned long align);
extern void add_memory_region(unsigned long start, unsigned long size,
int type);
+extern void update_memory_range(u64 start, u64 size, unsigned old_type,
+ unsigned new_type);
extern void setup_memory_region(void);
extern void contig_e820_setup(void);
extern unsigned long e820_end_of_ram(void);
struct page;
-static void inline clear_user_page(void *page, unsigned long vaddr,
+static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
{
clear_page(page);
}
-static void inline copy_user_page(void *to, void *from, unsigned long vaddr,
+static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *topage)
{
copy_page(to, from);
+extern void dma32_reserve_bootmem(void);
extern void pci_iommu_alloc(void);
/* The PCI address space does equal the physical memory
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
- * Note: there are no guarantees that this function will not be reordered
- * on non-x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
* @nr: Bit to change
* @addr: Address to start counting from
*
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
+ * sync_change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
* It also implies a memory barrier.
*/
static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
* It also implies a memory barrier.
*/
static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)