/* DS: data, read/write, 4 GB, base 0 */
[GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
};
- struct gdt_ptr gdt;
+ /* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead
+ of the gdt_ptr contents. Thus, make it static so it will
+ stay in memory, at least long enough that we switch to the
+ proper kernel GDT. */
+ static struct gdt_ptr gdt;
gdt.len = sizeof(boot_gdt)-1;
gdt.ptr = (u32)&boot_gdt + (ds() << 4);
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
+#include <asm/hpsim.h>
#include "hpsim_ssc.h"
static void simcons_write (struct console *, const char *, unsigned);
static struct tty_driver *simcons_console_device (struct console *, int *);
-struct console hpsim_cons = {
+static struct console hpsim_cons = {
.name = "simcons",
.write = simcons_write,
.device = simcons_console_device,
static struct tty_driver *simcons_console_device (struct console *c, int *index)
{
- extern struct tty_driver *hp_simserial_driver;
*index = c->index;
return hp_simserial_driver;
}
+
+int simcons_register(void)
+{
+ if (!ia64_platform_is("hpsim"))
+ return 1;
+
+ if (hpsim_cons.flags & CON_ENABLED)
+ return 1;
+
+ register_console(&hpsim_cons);
+ return 0;
+}
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
+#include <asm/hpsim.h>
#include "hpsim_ssc.h"
{
ROOT_DEV = Root_SDA1; /* default to first SCSI drive */
-#ifdef CONFIG_HP_SIMSERIAL_CONSOLE
- {
- extern struct console hpsim_cons;
- if (ia64_platform_is("hpsim"))
- register_console(&hpsim_cons);
- }
-#endif
+ simcons_register();
}
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/hpsim.h>
+
+#include "hpsim_ssc.h"
#define SIMETH_RECV_MAX 10
#define SIMETH_FRAME_SIZE ETH_FRAME_LEN
-#define SSC_NETDEV_PROBE 100
-#define SSC_NETDEV_SEND 101
-#define SSC_NETDEV_RECV 102
-#define SSC_NETDEV_ATTACH 103
-#define SSC_NETDEV_DETACH 104
-
#define NETWORK_INTR 8
struct simeth_local {
return r;
}
-extern long ia64_ssc (long, long, long, long, int);
-extern void ia64_ssc_connect_irq (long intr, long irq);
-
static inline int
netdev_probe(char *name, unsigned char *ether)
{
#include <linux/kernel.h>
#include <linux/timer.h>
#include <asm/irq.h>
+#include "hpsim_ssc.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
unsigned count;
};
-extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
-
static int desc[16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
};
DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
- [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
+ [0 ... IA64_NUM_VECTORS - 1] = -1
};
static cpumask_t vector_table[IA64_NUM_VECTORS] = {
domain = cfg->domain;
cpus_and(mask, cfg->domain, cpu_online_map);
for_each_cpu_mask(cpu, mask)
- per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
+ per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = IRQ_VECTOR_UNASSIGNED;
cfg->domain = CPU_MASK_NONE;
irq_status[irq] = IRQ_UNUSED;
/* Clear vector_irq */
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
- per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
+ per_cpu(vector_irq, cpu)[vector] = -1;
/* Mark the inuse vectors */
for (irq = 0; irq < NR_IRQS; ++irq) {
if (!cpu_isset(cpu, irq_cfg[irq].domain))
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
else {
+ int irq = local_vector_to_irq(vector);
+
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
- generic_handle_irq(local_vector_to_irq(vector));
+ if (unlikely(irq < 0)) {
+ printk(KERN_ERR "%s: Unexpected interrupt "
+ "vector %d on CPU %d is not mapped "
+ "to any IRQ!\n", __FUNCTION__, vector,
+ smp_processor_id());
+ } else
+ generic_handle_irq(irq);
/*
* Disable interrupts and send EOI:
kstat_this_cpu.irqs[vector]++;
else {
struct pt_regs *old_regs = set_irq_regs(NULL);
+ int irq = local_vector_to_irq(vector);
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
* it will work. I hope it works!.
* Probably could shared code.
*/
- vectors_in_migration[local_vector_to_irq(vector)]=0;
- generic_handle_irq(local_vector_to_irq(vector));
+ if (unlikely(irq < 0)) {
+ printk(KERN_ERR "%s: Unexpected interrupt "
+ "vector %d on CPU %d not being mapped "
+ "to any IRQ!!\n", __FUNCTION__, vector,
+ smp_processor_id());
+ } else {
+ vectors_in_migration[irq]=0;
+ generic_handle_irq(irq);
+ }
set_irq_regs(old_regs);
/*
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/unistd.h>
+#include <asm/hpsim.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
if (!efi_setup_pcdp_console(cmdline))
earlycons++;
#endif
-#ifdef CONFIG_HP_SIMSERIAL_CONSOLE
- {
- extern struct console hpsim_cons;
- register_console(&hpsim_cons);
+ if (!simcons_register())
earlycons++;
- }
-#endif
return (earlycons) ? 0 : -1;
}
/* clear TPR & XTP to enable all interrupt classes: */
ia64_setreg(_IA64_REG_CR_TPR, 0);
+
+ /* Clear any pending interrupts left by SAL/EFI */
+ while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
+ ia64_eoi();
+
#ifdef CONFIG_SMP
normal_xtp();
#endif
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
+#include <asm/sn/arch.h>
#define SMP_DEBUG 0
return (-EBUSY);
}
+ if (ia64_platform_is("sn2")) {
+ if (!sn_cpu_disable_allowed(cpu))
+ return -EBUSY;
+ }
+
cpu_clear(cpu, cpu_online_map);
if (migrate_platform_irqs(cpu)) {
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/mm.h>
+#include <linux/nmi.h>
#include <linux/swap.h>
#include <asm/meminit.h>
present = pgdat->node_present_pages;
for(i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page;
+ if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+ touch_nmi_watchdog();
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
present = pgdat->node_present_pages;
for(i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page;
+ if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+ touch_nmi_watchdog();
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
*/
void hub_error_init(struct hubdev_info *hubdev_info)
{
+
if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
- "SN_hub_error", (void *)hubdev_info))
+ "SN_hub_error", (void *)hubdev_info)) {
printk("hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
- return;
+ return;
+ }
+ sn_set_err_irq_affinity(SGI_II_ERROR);
}
*/
void ice_error_init(struct hubdev_info *hubdev_info)
{
+
if (request_irq
(SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error",
- (void *)hubdev_info))
+ (void *)hubdev_info)) {
printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
hubdev_info);
- return;
+ return;
+ }
+ sn_set_err_irq_affinity(SGI_TIO_ERROR);
}
#include <asm/sn/pcidev.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
+#include <asm/sn/sn_feature_sets.h>
static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
+#ifdef CONFIG_SMP
+void sn_set_err_irq_affinity(unsigned int irq)
+{
+ /*
+ * On systems which support CPU disabling (SHub2), all error interrupts
+ * are targetted at the boot CPU.
+ */
+ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
+ set_irq_affinity_info(irq, cpu_physical_id(0), 0);
+}
+#else
+void sn_set_err_irq_affinity(unsigned int irq) { }
+#endif
+
static void
sn_mask_irq(unsigned int irq)
{
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
+#include <asm/sn/sn_feature_sets.h>
DEFINE_PER_CPU(struct ptc_stats, ptcstats);
DECLARE_PER_CPU(struct ptc_stats, ptcstats);
sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
}
+#ifdef CONFIG_HOTPLUG_CPU
+/**
+ * sn_cpu_disable_allowed - Determine if a CPU can be disabled.
+ * @cpu - CPU that is requested to be disabled.
+ *
+ * CPU disable is only allowed on SHub2 systems running with a PROM
+ * that supports CPU disable. It is not permitted to disable the boot processor.
+ */
+bool sn_cpu_disable_allowed(int cpu)
+{
+ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) {
+ if (cpu != 0)
+ return true;
+ else
+ printk(KERN_WARNING
+ "Disabling the boot processor is not allowed.\n");
+
+ } else
+ printk(KERN_WARNING
+ "CPU disable is not supported on this system.\n");
+
+ return false;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
#ifdef CONFIG_PROC_FS
#define PTC_BASENAME "sgi_sn/ptc_statistics"
}
sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
- if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
+ objbuf = vmalloc(sz);
+ if (objbuf == NULL) {
printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
e = -ENOMEM;
goto out;
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
+ sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
__FUNCTION__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum);
+ sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
+
/* Setup locality information */
controller->node = tioca_kern->ca_closest_node;
return tioca_common;
tioce_common->ce_pcibus.bs_persist_segment,
tioce_common->ce_pcibus.bs_persist_busnum);
+ sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
return tioce_common;
}
struct device_node *dn)
{
struct ibmebus_dev *dev;
- const char *loc_code;
- int length;
-
- loc_code = of_get_property(dn, "ibm,loc-code", NULL);
- if (!loc_code) {
- printk(KERN_WARNING "%s: node %s missing 'ibm,loc-code'\n",
- __FUNCTION__, dn->name ? dn->name : "<unknown>");
- return ERR_PTR(-EINVAL);
- }
-
- if (strlen(loc_code) == 0) {
- printk(KERN_WARNING "%s: 'ibm,loc-code' is invalid\n",
- __FUNCTION__);
- return ERR_PTR(-EINVAL);
- }
+ int i, len, bus_len;
dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
- if (!dev) {
+ if (!dev)
return ERR_PTR(-ENOMEM);
- }
dev->ofdev.node = of_node_get(dn);
- length = strlen(loc_code);
- memcpy(dev->ofdev.dev.bus_id, loc_code
- + (length - min(length, BUS_ID_SIZE - 1)),
- min(length, BUS_ID_SIZE - 1));
+ len = strlen(dn->full_name + 1);
+ bus_len = min(len, BUS_ID_SIZE - 1);
+ memcpy(dev->ofdev.dev.bus_id, dn->full_name + 1
+ + (len - bus_len), bus_len);
+ for (i = 0; i < bus_len; i++)
+ if (dev->ofdev.dev.bus_id[i] == '/')
+ dev->ofdev.dev.bus_id[i] = '_';
/* Register with generic device framework. */
if (ibmebus_register_device_common(dev, dn->name) != 0) {
spu_irq_class_0(int irq, void *data)
{
struct spu *spu;
+ unsigned long stat, mask;
spu = data;
- spu->class_0_pending = 1;
+
+ mask = spu_int_mask_get(spu, 0);
+ stat = spu_int_stat_get(spu, 0);
+ stat &= mask;
+
+ spin_lock(&spu->register_lock);
+ spu->class_0_pending |= stat;
+ spin_unlock(&spu->register_lock);
+
spu->stop_callback(spu);
+ spu_int_stat_clear(spu, 0, stat);
+
return IRQ_HANDLED;
}
int
spu_irq_class_0_bottom(struct spu *spu)
{
- unsigned long stat, mask;
unsigned long flags;
-
- spu->class_0_pending = 0;
+ unsigned long stat;
spin_lock_irqsave(&spu->register_lock, flags);
- mask = spu_int_mask_get(spu, 0);
- stat = spu_int_stat_get(spu, 0);
-
- stat &= mask;
+ stat = spu->class_0_pending;
+ spu->class_0_pending = 0;
if (stat & 1) /* invalid DMA alignment */
__spu_trap_dma_align(spu);
if (stat & 4) /* error on SPU */
__spu_trap_error(spu);
- spu_int_stat_clear(spu, 0, stat);
spin_unlock_irqrestore(&spu->register_lock, flags);
return (stat & 0x7) ? -EIO : 0;
PS3_DEV_TYPE_STOR_ROM = TYPE_ROM, /* 5 */
PS3_DEV_TYPE_SB_GPIO = 6,
PS3_DEV_TYPE_STOR_FLASH = TYPE_RBC, /* 14 */
+ PS3_DEV_TYPE_STOR_DUMMY = 32,
PS3_DEV_TYPE_NOACCESS = 255,
};
return result;
}
+ if (tmp.bus_type == PS3_BUS_TYPE_STORAGE) {
+ /*
+ * A storage device may show up in the repository before the
+ * hypervisor has finished probing its type and regions
+ */
+ unsigned int num_regions;
+
+ if (tmp.dev_type == PS3_DEV_TYPE_STOR_DUMMY) {
+ pr_debug("%s:%u storage device not ready\n", __func__,
+ __LINE__);
+ return -ENODEV;
+ }
+
+ result = ps3_repository_read_stor_dev_num_regions(tmp.bus_index,
+ tmp.dev_index,
+ &num_regions);
+ if (result) {
+ pr_debug("%s:%d read_stor_dev_num_regions failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ if (!num_regions) {
+ pr_debug("%s:%u storage device has no regions yet\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+ }
+
result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
&tmp.dev_id);
static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
| MFC_STATE1_PROBLEM_STATE_MASK);
+ sr1 |= MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+
BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
spu_pdata(spu)->cache.sr1 = sr1;
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
},
},
- { }
+
+ { } /* terminate list */
};
static const char *oemstrs[] = {
"Tecra M3,",
DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
},
},
+
+ { } /* terminate list */
};
u32 iocfg;
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
- pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->pio_mode - XFER_UDMA_0]);
+ pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]);
}
static const unsigned int svia_bar_sizes[] = {
DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
+ sb->s_flags = flags;
+
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
up_write(&sb->s_umount);
struct inode *inode = OFNI_EDONI_2SFFJ(f);
struct page *pg;
- pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
(void *)jffs2_do_readpage_unlock, inode);
if (IS_ERR(pg))
return (void *)pg;
if (offset != 0)
return;
/* Cancel any unstarted writes on this page */
- nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
+ nfs_wb_page_cancel(page->mapping->host, page);
}
static int nfs_release_page(struct page *page, gfp_t gfp)
void nfs_release_automount_timer(void)
{
if (list_empty(&nfs_automount_list))
- cancel_delayed_work_sync(&nfs_automount_task);
+ cancel_delayed_work(&nfs_automount_task);
}
/*
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0)
- delegation_type = delegation->flags;
+ delegation_type = delegation->type;
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
}
res = d_add_unique(dentry, igrab(state->inode));
if (res != NULL)
- dentry = res;
+ path.dentry = res;
nfs4_intent_set_file(nd, &path, state);
return res;
}
kfree(string);
switch (token) {
- case Opt_udp:
+ case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = IPPROTO_UDP;
mnt->timeo = 7;
mnt->retrans = 5;
break;
- case Opt_tcp:
+ case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = IPPROTO_TCP;
mnt->timeo = 600;
kfree(string);
switch (token) {
- case Opt_udp:
+ case Opt_xprt_udp:
mnt->mount_server.protocol = IPPROTO_UDP;
break;
- case Opt_tcp:
+ case Opt_xprt_tcp:
mnt->mount_server.protocol = IPPROTO_TCP;
break;
default:
c = strchr(dev_name, ':');
if (c == NULL)
return -EINVAL;
- len = c - dev_name - 1;
+ len = c - dev_name;
if (len > sizeof(data->hostname))
- return -EINVAL;
+ return -ENAMETOOLONG;
strncpy(data->hostname, dev_name, len);
args.nfs_server.hostname = data->hostname;
c++;
if (strlen(c) > NFS_MAXPATHLEN)
- return -EINVAL;
+ return -ENAMETOOLONG;
args.nfs_server.export_path = c;
status = nfs_try_mount(&args, mntfh);
if (status)
- return -EINVAL;
+ return status;
/*
* Translate to nfs_mount_data, which nfs_fill_super
/* while calculating len, pretend ':' is '\0' */
len = c - dev_name;
if (len > NFS4_MAXNAMLEN)
- return -EINVAL;
+ return -ENAMETOOLONG;
*hostname = kzalloc(len, GFP_KERNEL);
if (*hostname == NULL)
return -ENOMEM;
c++; /* step over the ':' */
len = strlen(c);
if (len > NFS4_MAXPATHLEN)
- return -EINVAL;
+ return -ENAMETOOLONG;
*mntpath = kzalloc(len + 1, GFP_KERNEL);
if (*mntpath == NULL)
return -ENOMEM;
return ret;
}
+int nfs_wb_page_cancel(struct inode *inode, struct page *page)
+{
+ struct nfs_page *req;
+ loff_t range_start = page_offset(page);
+ loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+ struct writeback_control wbc = {
+ .bdi = page->mapping->backing_dev_info,
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .range_start = range_start,
+ .range_end = range_end,
+ };
+ int ret = 0;
+
+ BUG_ON(!PageLocked(page));
+ for (;;) {
+ req = nfs_page_find_request(page);
+ if (req == NULL)
+ goto out;
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ nfs_release_request(req);
+ break;
+ }
+ if (nfs_lock_request_dontget(req)) {
+ nfs_inode_remove_request(req);
+ /*
+ * In case nfs_inode_remove_request has marked the
+ * page as being dirty
+ */
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ nfs_unlock_request(req);
+ break;
+ }
+ ret = nfs_wait_on_request(req);
+ if (ret < 0)
+ goto out;
+ }
+ if (!PagePrivate(page))
+ return 0;
+ ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
+out:
+ return ret;
+}
+
int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
{
loff_t range_start = page_offset(page);
static inline int
kmem_shake_allow(gfp_t gfp_mask)
{
- return (gfp_mask & __GFP_WAIT);
+ return (gfp_mask & __GFP_WAIT) != 0;
}
#endif /* __XFS_SUPPORT_KMEM_H__ */
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
- size_t pg_offset, len = 0;
+ size_t pg_offset, pg_len = 0;
if (tindex == tlast) {
pg_offset =
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && !TestSetPageLocked(page)) {
- len = xfs_probe_page(page, pg_offset, mapped);
+ pg_len = xfs_probe_page(page, pg_offset, mapped);
unlock_page(page);
}
- if (!len) {
+ if (!pg_len) {
done = 1;
break;
}
- total += len;
+ total += pg_len;
tindex++;
}
.inherit_nosym = { 0, 0, 1 },
.rotorstep = { 1, 1, 255 },
.inherit_nodfrg = { 0, 1, 1 },
- .fstrm_timer = { 1, 50, 3600*100},
+ .fstrm_timer = { 1, 30*100, 3600*100},
};
/*
* Initialize the dquot hash tables.
*/
udqhash = kmem_zalloc_greedy(&hsize,
- XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
+ XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
+ XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
hsize /= sizeof(xfs_dqhash_t);
extern void assfail(char *expr, char *f, int l);
#define ASSERT_ALWAYS(expr) \
- (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef DEBUG
-# define ASSERT(expr) ((void)0)
+#define ASSERT(expr) ((void)0)
#ifndef STATIC
# define STATIC static noinline
#else /* DEBUG */
-# define ASSERT(expr) ASSERT_ALWAYS(expr)
-# include <linux/random.h>
+#include <linux/random.h>
+
+#define ASSERT(expr) \
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC noinline
error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- int i;
cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
(long long)bno);
cmn_err(CE_ALERT, "dir: inode %lld\n",
}
cb = iclog->ic_callback;
- while (cb != 0) {
+ while (cb) {
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_callback = NULL;
LOG_UNLOCK(log, s);
/* perform callbacks in the order given */
- for (; cb != 0; cb = cb_next) {
+ for (; cb; cb = cb_next) {
cb_next = cb->cb_next;
cb->cb_func(cb->cb_arg, aborted);
}
loopdidcallbacks++;
funcdidcallbacks++;
- ASSERT(iclog->ic_callback == 0);
+ ASSERT(iclog->ic_callback == NULL);
if (!(iclog->ic_state & XLOG_STATE_IOERROR))
iclog->ic_state = XLOG_STATE_DIRTY;
#else
/* When we debug, it is easier if tickets are cycled */
ticket->t_next = NULL;
- if (log->l_tail != 0) {
+ if (log->l_tail) {
log->l_tail->t_next = ticket;
} else {
- ASSERT(log->l_freelist == 0);
+ ASSERT(log->l_freelist == NULL);
log->l_freelist = ticket;
}
log->l_tail = ticket;
s = LOG_LOCK(log);
icptr = log->l_iclog;
for (i=0; i < log->l_iclog_bufs; i++) {
- if (icptr == 0)
+ if (icptr == NULL)
xlog_panic("xlog_verify_iclog: invalid ptr");
icptr = icptr->ic_next;
}
int old_len;
item = trans->r_itemq;
- if (item == 0) {
+ if (item == NULL) {
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t) &trans->r_theader +
if (!len)
return 0;
item = trans->r_itemq;
- if (item == 0) {
+ if (item == NULL) {
ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
if (len == sizeof(xfs_trans_header_t))
xlog_recover_add_item(&trans->r_itemq);
xlog_recover_t *tp;
int found = 0;
- ASSERT(trans != 0);
+ ASSERT(trans != NULL);
if (trans == *q) {
*q = (*q)->r_next;
} else {
tp = *q;
- while (tp != 0) {
+ while (tp) {
if (tp->r_next == trans) {
found = 1;
break;
xlog_recover_item_t **q,
xlog_recover_item_t *item)
{
- if (*q == 0) {
+ if (*q == NULL) {
item->ri_prev = item->ri_next = item;
*q = item;
} else {
break;
nbits = xfs_contig_bits(data_map, map_size, bit);
ASSERT(nbits > 0);
- ASSERT(item->ri_buf[i].i_addr != 0);
+ ASSERT(item->ri_buf[i].i_addr != NULL);
ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
ASSERT(XFS_BUF_COUNT(bp) >=
((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
--- /dev/null
+#ifndef _ASMIA64_HPSIM_H
+#define _ASMIA64_HPSIM_H
+
+#ifndef CONFIG_HP_SIMSERIAL_CONSOLE
+static inline int simcons_register(void) { return 1; }
+#else
+int simcons_register(void);
+#endif
+
+struct tty_driver;
+extern struct tty_driver *hp_simserial_driver;
+
+void ia64_ssc_connect_irq(long intr, long irq);
+void ia64_ctl_trace(long on);
+
+#endif
extern u8 sn_region_size;
extern void sn_flush_all_caches(long addr, long bytes);
+extern bool sn_cpu_disable_allowed(int cpu);
#endif /* _ASM_IA64_SN_ARCH_H */
int, nasid_t, int);
extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
+extern void sn_set_err_irq_affinity(unsigned int);
extern struct list_head **sn_irq_lh;
#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
#define PRF_PAL_CACHE_FLUSH_SAFE 0
#define PRF_DEVICE_FLUSH_LIST 1
#define PRF_HOTPLUG_SUPPORT 2
+#define PRF_CPU_DISABLE_SUPPORT 3
/* --------------------- OS Features -------------------------------*/
u64 flags;
u64 dar;
u64 dsisr;
+ u64 class_0_pending;
size_t ls_size;
unsigned int slb_replace;
struct mm_struct *mm;
unsigned long long timestamp;
pid_t pid;
pid_t tgid;
- int class_0_pending;
spinlock_t register_lock;
void (* wbox_callback)(struct spu *spu);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_priority(struct inode *inode, struct page* page, int how);
+extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commit_alloc(void);
/*
* Shift right and round:
*/
-#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
* Check whether we'd overflow the 64-bit multiplication:
*/
if (unlikely(tmp > WMULT_CONST))
- tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
+ tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
WMULT_SHIFT/2);
else
- tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT);
+ tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
static void set_load_weight(struct task_struct *p)
{
- task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
p->se.wait_runtime = 0;
if (task_has_rt_policy(p)) {
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
+ if (*imbalance < busiest_load_per_task) {
unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn;
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
- if (pwr_move <= pwr_now)
- goto out_balanced;
-
- *imbalance = busiest_load_per_task;
+ if (pwr_move > pwr_now)
+ *imbalance = busiest_load_per_task;
}
return busiest;
p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
#endif
p->se.sum_exec_runtime = 0;
+ p->se.prev_sum_exec_runtime = 0;
}
update_load_add(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
+
+ schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}
static inline void
update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
+
+ schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
}
static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
/*
* It will always fit into 'long':
*/
- return (long) (tmp >> WMULT_SHIFT);
+ return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
}
static inline void
prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair);
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
delta_fair = se->wait_runtime - prev_runtime;
/*
if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock;
}
- cfs_rq->wait_runtime -= se->wait_runtime;
#endif
}
__dequeue_entity(cfs_rq, se);
/*
* Preempt the current task with a newly woken task if needed:
*/
-static int
+static void
__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct sched_entity *curr, unsigned long granularity)
{
s64 __delta = curr->fair_key - se->fair_key;
+ unsigned long ideal_runtime, delta_exec;
+
+ /*
+ * ideal_runtime is compared against sum_exec_runtime, which is
+ * walltime, hence do not scale.
+ */
+ ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
+ (unsigned long)sysctl_sched_min_granularity);
+
+ /*
+ * If we executed more than what the latency constraint suggests,
+ * reduce the rescheduling granularity. This way the total latency
+ * of how much a task is not scheduled converges to
+ * sysctl_sched_latency:
+ */
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime)
+ granularity = 0;
/*
* Take scheduling granularity into account - do not
* preempt the current task unless the best task has
* a larger than sched_granularity fairness advantage:
+ *
+ * scale granularity as key space is in fair_clock.
*/
- if (__delta > niced_granularity(curr, granularity)) {
+ if (__delta > niced_granularity(curr, granularity))
resched_task(rq_of(cfs_rq)->curr);
- return 1;
- }
- return 0;
}
static inline void
update_stats_wait_end(cfs_rq, se);
update_stats_curr_start(cfs_rq, se);
set_cfs_rq_curr(cfs_rq, se);
+ se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- unsigned long gran, ideal_runtime, delta_exec;
struct sched_entity *next;
/*
if (next == curr)
return;
- gran = sched_granularity(cfs_rq);
- ideal_runtime = niced_granularity(curr,
- max(sysctl_sched_latency / cfs_rq->nr_running,
- (unsigned long)sysctl_sched_min_granularity));
- /*
- * If we executed more than what the latency constraint suggests,
- * reduce the rescheduling granularity. This way the total latency
- * of how much a task is not scheduled converges to
- * sysctl_sched_latency:
- */
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime)
- gran = 0;
-
- if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
- curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
+ __check_preempt_curr_fair(cfs_rq, next, curr,
+ sched_granularity(cfs_rq));
}
/**************************************************
* The statistical average of wait_runtime is about
* -granularity/2, so initialize the task with that:
*/
- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
+ if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
- }
__enqueue_entity(cfs_rq, se);
}
struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;
- if (dst_metric_locked(dst, RTAX_RTO_MIN))
+ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst->metrics[RTAX_RTO_MIN-1];
return rto_min;
}
}
}
-static void conf_askvalue(struct symbol *sym, const char *def)
+static int conf_askvalue(struct symbol *sym, const char *def)
{
enum symbol_type type = sym_get_type(sym);
tristate val;
printf("%s\n", def);
line[0] = '\n';
line[1] = 0;
- return;
+ return 0;
}
switch (input_mode) {
case set_random:
if (sym_has_value(sym)) {
printf("%s\n", def);
- return;
+ return 0;
}
break;
case ask_new:
case ask_silent:
if (sym_has_value(sym)) {
printf("%s\n", def);
- return;
+ return 0;
}
check_stdin();
case ask_all:
fflush(stdout);
fgets(line, 128, stdin);
- return;
+ return 1;
case set_default:
printf("%s\n", def);
- return;
+ return 1;
default:
break;
}
case S_HEX:
case S_STRING:
printf("%s\n", def);
- return;
+ return 1;
default:
;
}
break;
}
printf("%s", line);
+ return 1;
}
int conf_string(struct menu *menu)
def = sym_get_string_value(sym);
if (sym_get_string_value(sym))
printf("[%s] ", def);
- conf_askvalue(sym, def);
+ if (!conf_askvalue(sym, def))
+ return 0;
switch (line[0]) {
case '\n':
break;
if (menu_has_help(menu))
printf("/?");
printf("] ");
- conf_askvalue(sym, sym_get_string_value(sym));
+ if (!conf_askvalue(sym, sym_get_string_value(sym)))
+ return 0;
strip(line);
switch (line[0]) {