1.11.0 Fix table line argument order
(wrong raid10_copies/raid10_format sequence)
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
+1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
Optional properties:
- clocks: Reference to the crypto engine clock.
-- dma-mask: The address mask limitation. Defaults to 64.
Example:
interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3",
"eip";
clocks = <&cpm_syscon0 1 26>;
- dma-mask = <0xff 0xffffffff>;
status = "disabled";
};
};
dwmmc0@12200000 {
- num-slots = <1>;
cap-mmc-highspeed;
cap-sd-highspeed;
broken-cd;
fifo-depth = <0x20>;
bus-width = <4>;
- num-slots = <1>;
disable-wp;
};
/* Board portion */
dwmmc0@fcd03000 {
- num-slots = <1>;
vmmc-supply = <&ldo12>;
fifo-depth = <0x100>;
pinctrl-names = "default";
dwmmc_1: dwmmc1@f723e000 {
compatible = "hisilicon,hi6220-dw-mshc";
- num-slots = <0x1>;
bus-width = <0x4>;
disable-wp;
cap-sd-highspeed;
* #address-cells: should be 1.
* #size-cells: should be 0.
-# Slots: The slot specific information are contained within child-nodes with
- each child-node representing a supported slot. There should be atleast one
- child node representing a card slot. The name of the child node representing
- the slot is recommended to be slot@n where n is the unique number of the slot
- connected to the controller. The following are optional properties which
- can be included in the slot child node.
+# Slots (DEPRECATED): The slot specific information are contained within
+ child-nodes with each child-node representing a supported slot. There should
+ be atleast one child node representing a card slot. The name of the child node
+ representing the slot is recommended to be slot@n where n is the unique number
+ of the slot connected to the controller. The following are optional properties
+ which can be included in the slot child node.
* reg: specifies the physical slot number. The valid values of this
property is 0 to (num-slots -1), where num-slots is the value
clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
(Use the "max-frequency" instead of "clock-freq-min-max".)
-* num-slots: specifies the number of slots supported by the controller.
+* num-slots (DEPRECATED): specifies the number of slots supported by the controller.
The number of physical slots actually used could be equal or less than the
value specified by num-slots. If this property is not specified, the value
of num-slot property is assumed to be 1.
dwmmc0@12200000 {
clock-frequency = <400000000>;
clock-freq-min-max = <400000 200000000>;
- num-slots = <1>;
broken-cd;
fifo-depth = <0x80>;
card-detect-delay = <200>;
dwmmc0@12200000 {
clock-frequency = <400000000>;
clock-freq-min-max = <400000 200000000>;
- num-slots = <1>;
broken-cd;
fifo-depth = <0x80>;
card-detect-delay = <200>;
clock-frequency = <50000000>;
clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>;
clock-names = "biu", "ciu";
- num-slots = <1>;
max-frequency = <50000000>;
cap-sdio-irq;
cap-sd-highspeed;
VERSION = 4
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
#ifndef __ASSEMBLY__
+#define ARCH_HAS_KIMAGE_ARCH
+struct kimage_arch {
+ u32 kernel_r2;
+};
+
/**
* crash_setup_regs() - save registers for the panic kernel
* @newregs: registers are saved here
* bytes, to prevent unpredictable padding in the signal frame.
*/
+/*
+ * Dummy padding block: if this magic is encountered, the block should
+ * be skipped using the corresponding size field.
+ */
+#define DUMMY_MAGIC 0xb0d9ed01
+
#ifdef CONFIG_CRUNCH
#define CRUNCH_MAGIC 0x5065cf03
#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
static atomic_t waiting_for_crash_ipi;
-static unsigned long dt_mem;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
__be32 header;
int i, err;
+ image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET
+ + KEXEC_ARM_ATAGS_OFFSET;
+
/*
* Validate that if the current HW supports SMP, then the SW supports
* and implements CPU hotplug for the current HW. If not, we won't be
if (err)
return err;
- if (be32_to_cpu(header) == OF_DT_HEADER)
- dt_mem = current_segment->mem;
+ if (header == cpu_to_be32(OF_DT_HEADER))
+ image->arch.kernel_r2 = current_segment->mem;
}
return 0;
}
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
- kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
- + KEXEC_ARM_ATAGS_OFFSET;
+ kexec_boot_atags = image->arch.kernel_r2;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
if (crash_base <= 0) {
unsigned long long crash_max = idmap_to_phys((u32)~0);
+ unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
+ if (crash_max > lowmem_max)
+ crash_max = lowmem_max;
crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
crash_size, CRASH_ALIGN);
if (!crash_base) {
return __copy_to_user(frame, kframe, sizeof(*frame));
}
-static int restore_crunch_context(struct crunch_sigframe __user *frame)
+static int restore_crunch_context(char __user **auxp)
{
+ struct crunch_sigframe __user *frame =
+ (struct crunch_sigframe __user *)*auxp;
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
if (kframe->magic != CRUNCH_MAGIC ||
kframe->size != CRUNCH_STORAGE_SIZE)
return -1;
+ *auxp += CRUNCH_STORAGE_SIZE;
crunch_task_restore(current_thread_info(), &kframe->storage);
return 0;
}
#ifdef CONFIG_IWMMXT
-static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
+static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
{
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
+ int err = 0;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- kframe->magic = IWMMXT_MAGIC;
- kframe->size = IWMMXT_STORAGE_SIZE;
- iwmmxt_task_copy(current_thread_info(), &kframe->storage);
- return __copy_to_user(frame, kframe, sizeof(*frame));
+
+ if (test_thread_flag(TIF_USING_IWMMXT)) {
+ kframe->magic = IWMMXT_MAGIC;
+ kframe->size = IWMMXT_STORAGE_SIZE;
+ iwmmxt_task_copy(current_thread_info(), &kframe->storage);
+
+ err = __copy_to_user(frame, kframe, sizeof(*frame));
+ } else {
+ /*
+ * For bug-compatibility with older kernels, some space
+ * has to be reserved for iWMMXt even if it's not used.
+ * Set the magic and size appropriately so that properly
+ * written userspace can skip it reliably:
+ */
+ __put_user_error(DUMMY_MAGIC, &frame->magic, err);
+ __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+ }
+
+ return err;
}
-static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
+static int restore_iwmmxt_context(char __user **auxp)
{
+ struct iwmmxt_sigframe __user *frame =
+ (struct iwmmxt_sigframe __user *)*auxp;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
- if (kframe->magic != IWMMXT_MAGIC ||
- kframe->size != IWMMXT_STORAGE_SIZE)
+
+ /*
+ * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
+ * block is discarded for compatibility with setup_sigframe() if
+ * present, but we don't mandate its presence. If some other
+ * magic is here, it's not for us:
+ */
+ if (!test_thread_flag(TIF_USING_IWMMXT) &&
+ kframe->magic != DUMMY_MAGIC)
+ return 0;
+
+ if (kframe->size != IWMMXT_STORAGE_SIZE)
return -1;
- iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+
+ if (test_thread_flag(TIF_USING_IWMMXT)) {
+ if (kframe->magic != IWMMXT_MAGIC)
+ return -1;
+
+ iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+ }
+
+ *auxp += IWMMXT_STORAGE_SIZE;
return 0;
}
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
}
-static int restore_vfp_context(struct vfp_sigframe __user *frame)
+static int restore_vfp_context(char __user **auxp)
{
+ struct vfp_sigframe __user *frame =
+ (struct vfp_sigframe __user *)*auxp;
unsigned long magic;
unsigned long size;
int err = 0;
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
+ *auxp += size;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
}
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
- struct aux_sigframe __user *aux;
+ char __user *aux;
sigset_t set;
int err;
err |= !valid_user_regs(regs);
- aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+ aux = (char __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
- err |= restore_crunch_context(&aux->crunch);
+ err |= restore_crunch_context(&aux);
#endif
#ifdef CONFIG_IWMMXT
- if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
- err |= restore_iwmmxt_context(&aux->iwmmxt);
+ if (err == 0)
+ err |= restore_iwmmxt_context(&aux);
#endif
#ifdef CONFIG_VFP
if (err == 0)
- err |= restore_vfp_context(&aux->vfp);
+ err |= restore_vfp_context(&aux);
#endif
return err;
err |= preserve_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
- if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+ if (err == 0)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
#define HSMMC_NAME_LEN 9
-static void omap_hsmmc1_before_set_reg(struct device *dev,
- int power_on, int vdd)
-{
- u32 reg, prog_io;
- struct omap_hsmmc_platform_data *mmc = dev->platform_data;
-
- if (mmc->remux)
- mmc->remux(dev, power_on);
-
- /*
- * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
- * card with Vcc regulator (from twl4030 or whatever). OMAP has both
- * 1.8V and 3.0V modes, controlled by the PBIAS register.
- *
- * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
- * is most naturally TWL VSIM; those pins also use PBIAS.
- *
- * FIXME handle VMMC1A as needed ...
- */
- if (power_on) {
- if (cpu_is_omap2430()) {
- reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1);
- if ((1 << vdd) >= MMC_VDD_30_31)
- reg |= OMAP243X_MMC1_ACTIVE_OVERWRITE;
- else
- reg &= ~OMAP243X_MMC1_ACTIVE_OVERWRITE;
- omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1);
- }
-
- if (mmc->internal_clock) {
- reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- reg |= OMAP2_MMCSDIO1ADPCLKISEL;
- omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0);
- }
-
- reg = omap_ctrl_readl(control_pbias_offset);
- if (cpu_is_omap3630()) {
- /* Set MMC I/O to 52MHz */
- prog_io = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1);
- prog_io |= OMAP3630_PRG_SDMMC1_SPEEDCTRL;
- omap_ctrl_writel(prog_io, OMAP343X_CONTROL_PROG_IO1);
- } else {
- reg |= OMAP2_PBIASSPEEDCTRL0;
- }
- reg &= ~OMAP2_PBIASLITEPWRDNZ0;
- omap_ctrl_writel(reg, control_pbias_offset);
- } else {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg &= ~OMAP2_PBIASLITEPWRDNZ0;
- omap_ctrl_writel(reg, control_pbias_offset);
- }
-}
-
-static void omap_hsmmc1_after_set_reg(struct device *dev, int power_on, int vdd)
-{
- u32 reg;
-
- /* 100ms delay required for PBIAS configuration */
- msleep(100);
-
- if (power_on) {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg |= (OMAP2_PBIASLITEPWRDNZ0 | OMAP2_PBIASSPEEDCTRL0);
- if ((1 << vdd) <= MMC_VDD_165_195)
- reg &= ~OMAP2_PBIASLITEVMODE0;
- else
- reg |= OMAP2_PBIASLITEVMODE0;
- omap_ctrl_writel(reg, control_pbias_offset);
- } else {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg |= (OMAP2_PBIASSPEEDCTRL0 | OMAP2_PBIASLITEPWRDNZ0 |
- OMAP2_PBIASLITEVMODE0);
- omap_ctrl_writel(reg, control_pbias_offset);
- }
-}
-
-static void hsmmc2_select_input_clk_src(struct omap_hsmmc_platform_data *mmc)
-{
- u32 reg;
-
- reg = omap_ctrl_readl(control_devconf1_offset);
- if (mmc->internal_clock)
- reg |= OMAP2_MMCSDIO2ADPCLKISEL;
- else
- reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
- omap_ctrl_writel(reg, control_devconf1_offset);
-}
-
-static void hsmmc2_before_set_reg(struct device *dev, int power_on, int vdd)
-{
- struct omap_hsmmc_platform_data *mmc = dev->platform_data;
-
- if (mmc->remux)
- mmc->remux(dev, power_on);
-
- if (power_on)
- hsmmc2_select_input_clk_src(mmc);
-}
-
-static int am35x_hsmmc2_set_power(struct device *dev, int power_on, int vdd)
-{
- struct omap_hsmmc_platform_data *mmc = dev->platform_data;
-
- if (power_on)
- hsmmc2_select_input_clk_src(mmc);
-
- return 0;
-}
-
-static int nop_mmc_set_power(struct device *dev, int power_on, int vdd)
-{
- return 0;
-}
-
static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
struct omap_hsmmc_platform_data *mmc)
{
return -ENOMEM;
}
- if (c->name)
- strncpy(hc_name, c->name, HSMMC_NAME_LEN);
- else
- snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i",
- c->mmc, 1);
+ snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1);
mmc->name = hc_name;
mmc->caps = c->caps;
- mmc->internal_clock = !c->ext_clock;
mmc->reg_offset = 0;
- if (c->cover_only) {
- /* detect if mobile phone cover removed */
- mmc->gpio_cd = -EINVAL;
- mmc->gpio_cod = c->gpio_cd;
- } else {
- /* card detect pin on the mmc socket itself */
- mmc->gpio_cd = c->gpio_cd;
- mmc->gpio_cod = -EINVAL;
- }
- mmc->gpio_wp = c->gpio_wp;
-
- mmc->remux = c->remux;
- mmc->init_card = c->init_card;
-
- if (c->nonremovable)
- mmc->nonremovable = 1;
-
- /*
- * NOTE: MMC slots should have a Vcc regulator set up.
- * This may be from a TWL4030-family chip, another
- * controllable regulator, or a fixed supply.
- *
- * temporary HACK: ocr_mask instead of fixed supply
- */
- if (soc_is_am35xx())
- mmc->ocr_mask = MMC_VDD_165_195 |
- MMC_VDD_26_27 |
- MMC_VDD_27_28 |
- MMC_VDD_29_30 |
- MMC_VDD_30_31 |
- MMC_VDD_31_32;
- else
- mmc->ocr_mask = c->ocr_mask;
-
- if (!soc_is_am35xx())
- mmc->features |= HSMMC_HAS_PBIAS;
-
- switch (c->mmc) {
- case 1:
- if (mmc->features & HSMMC_HAS_PBIAS) {
- /* on-chip level shifting via PBIAS0/PBIAS1 */
- mmc->before_set_reg =
- omap_hsmmc1_before_set_reg;
- mmc->after_set_reg =
- omap_hsmmc1_after_set_reg;
- }
-
- if (soc_is_am35xx())
- mmc->set_power = nop_mmc_set_power;
-
- /* OMAP3630 HSMMC1 supports only 4-bit */
- if (cpu_is_omap3630() &&
- (c->caps & MMC_CAP_8_BIT_DATA)) {
- c->caps &= ~MMC_CAP_8_BIT_DATA;
- c->caps |= MMC_CAP_4_BIT_DATA;
- mmc->caps = c->caps;
- }
- break;
- case 2:
- if (soc_is_am35xx())
- mmc->set_power = am35x_hsmmc2_set_power;
-
- if (c->ext_clock)
- c->transceiver = 1;
- if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
- c->caps &= ~MMC_CAP_8_BIT_DATA;
- c->caps |= MMC_CAP_4_BIT_DATA;
- }
- if (mmc->features & HSMMC_HAS_PBIAS) {
- /* off-chip level shifting, or none */
- mmc->before_set_reg = hsmmc2_before_set_reg;
- mmc->after_set_reg = NULL;
- }
- break;
- case 3:
- case 4:
- case 5:
- mmc->before_set_reg = NULL;
- mmc->after_set_reg = NULL;
- break;
- default:
- pr_err("MMC%d configuration not supported!\n", c->mmc);
- kfree(hc_name);
- return -ENODEV;
- }
return 0;
}
void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
{
struct platform_device *pdev;
- struct omap_hsmmc_platform_data *mmc_pdata;
int res;
if (omap_hsmmc_done != 1)
omap_hsmmc_done++;
for (; c->mmc; c++) {
- if (!c->deferred)
- continue;
-
pdev = c->pdev;
if (!pdev)
continue;
-
- mmc_pdata = pdev->dev.platform_data;
- if (!mmc_pdata)
- continue;
-
- if (c->cover_only) {
- /* detect if mobile phone cover removed */
- mmc_pdata->gpio_cd = -EINVAL;
- mmc_pdata->gpio_cod = c->gpio_cd;
- } else {
- /* card detect pin on the mmc socket itself */
- mmc_pdata->gpio_cd = c->gpio_cd;
- mmc_pdata->gpio_cod = -EINVAL;
- }
- mmc_pdata->gpio_wp = c->gpio_wp;
-
res = omap_device_register(pdev);
if (res)
- pr_err("Could not late init MMC %s\n",
- c->name);
+ pr_err("Could not late init MMC\n");
}
}
if (oh->dev_attr != NULL) {
mmc_dev_attr = oh->dev_attr;
mmc_data->controller_flags = mmc_dev_attr->flags;
- /*
- * erratum 2.1.1.128 doesn't apply if board has
- * a transceiver is attached
- */
- if (hsmmcinfo->transceiver)
- mmc_data->controller_flags &=
- ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ;
}
pdev = platform_device_alloc(name, ctrl_nr - 1);
hsmmcinfo->pdev = pdev;
- if (hsmmcinfo->deferred)
- goto free_mmc;
-
res = omap_device_register(pdev);
if (res) {
pr_err("Could not register od for %s\n", name);
u8 mmc; /* controller 1/2/3 */
u32 caps; /* 4/8 wires and any additional host
* capabilities OR'd (ref. linux/mmc/host.h) */
- bool transceiver; /* MMC-2 option */
- bool ext_clock; /* use external pin for input clock */
- bool cover_only; /* No card detect - just cover switch */
- bool nonremovable; /* Nonremovable e.g. eMMC */
- bool deferred; /* mmc needs a deferred probe */
int gpio_cd; /* or -EINVAL */
int gpio_wp; /* or -EINVAL */
- char *name; /* or NULL for default */
struct platform_device *pdev; /* mmc controller instance */
- int ocr_mask; /* temporary HACK */
- /* Remux (pad configuration) when powering on/off */
- void (*remux)(struct device *dev, int power_on);
/* init some special card */
void (*init_card)(struct mmc_card *card);
};
static struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
+ .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
};
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
{
const struct dma_map_ops *ops = &dma_noop_ops;
+ void *ret;
/*
- * We are here because:
+ * Try generic allocator first if we are advertised that
+ * consistency is not required.
+ */
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ return ops->alloc(dev, size, dma_handle, gfp, attrs);
+
+ ret = dma_alloc_from_global_coherent(size, dma_handle);
+
+ /*
+ * dma_alloc_from_global_coherent() may fail because:
+ *
* - no consistent DMA region has been defined, so we can't
* continue.
* - there is no space left in consistent DMA region, so we
* advertised that consistency is not required.
*/
- if (attrs & DMA_ATTR_NON_CONSISTENT)
- return ops->alloc(dev, size, dma_handle, gfp, attrs);
-
- WARN_ON_ONCE(1);
- return NULL;
+ WARN_ON_ONCE(ret == NULL);
+ return ret;
}
static void arm_nommu_dma_free(struct device *dev, size_t size,
{
const struct dma_map_ops *ops = &dma_noop_ops;
- if (attrs & DMA_ATTR_NON_CONSISTENT)
+ if (attrs & DMA_ATTR_NON_CONSISTENT) {
ops->free(dev, size, cpu_addr, dma_addr, attrs);
- else
- WARN_ON_ONCE(1);
+ } else {
+ int ret = dma_release_from_global_coherent(get_order(size),
+ cpu_addr);
+
+ WARN_ON_ONCE(ret == 0);
+ }
return;
}
+static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ int ret;
+
+ if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
+ return ret;
+
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops arm_nommu_dma_ops = {
.alloc = arm_nommu_dma_alloc,
.free = arm_nommu_dma_free,
+ .mmap = arm_nommu_dma_mmap,
.map_page = arm_nommu_dma_map_page,
.unmap_page = arm_nommu_dma_unmap_page,
.map_sg = arm_nommu_dma_map_sg,
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
" sub x30, x30, %[ret]\n"
" cbnz x30, 1b\n"
"2:")
- : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+ : [ret] "+r" (x0), [v] "+Q" (v->counter)
:
: __LL_SC_CLOBBERS, "cc", "memory");
* the "%x0" template means XZR.
*/
#define write_sysreg(v, r) do { \
- u64 __val = (u64)v; \
+ u64 __val = (u64)(v); \
asm volatile("msr " __stringify(r) ", %x0" \
: : "rZ" (__val)); \
} while (0)
})
#define write_sysreg_s(v, r) do { \
- u64 __val = (u64)v; \
+ u64 __val = (u64)(v); \
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
*/
#define __range_ok(addr, size) \
({ \
- unsigned long __addr = (unsigned long __force)(addr); \
+ unsigned long __addr = (unsigned long)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
* Don't warn spuriously.
*/
if (cpu != 0)
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ pr_err("%pOF: missing enable-method property\n",
+ dn);
}
} else {
enable_method = acpi_get_enable_method(cpu);
*/
cell = of_get_property(dn, "reg", NULL);
if (!cell) {
- pr_err("%s: missing reg property\n", dn->full_name);
+ pr_err("%pOF: missing reg property\n", dn);
return INVALID_HWID;
}
* Non affinity bits must be set to 0 in the DT
*/
if (hwid & ~MPIDR_HWID_BITMASK) {
- pr_err("%s: invalid reg property\n", dn->full_name);
+ pr_err("%pOF: invalid reg property\n", dn);
return INVALID_HWID;
}
return hwid;
goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) {
- pr_err("%s: duplicate cpu reg properties in the DT\n",
- dn->full_name);
+ pr_err("%pOF: duplicate cpu reg properties in the DT\n",
+ dn);
goto next;
}
*/
if (hwid == cpu_logical_map(0)) {
if (bootcpu_valid) {
- pr_err("%s: duplicate boot cpu reg property in DT\n",
- dn->full_name);
+ pr_err("%pOF: duplicate boot cpu reg property in DT\n",
+ dn);
goto next;
}
}
}
- pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node);
return -1;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else {
- pr_err("%s: Can't get CPU for thread\n",
- t->full_name);
+ pr_err("%pOF: Can't get CPU for thread\n",
+ t);
of_node_put(t);
return -EINVAL;
}
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
- pr_err("%s: Core has both threads and CPU\n",
- core->full_name);
+ pr_err("%pOF: Core has both threads and CPU\n",
+ core);
return -EINVAL;
}
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf) {
- pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}
has_cores = true;
if (depth == 0) {
- pr_err("%s: cpu-map children should be clusters\n",
- c->full_name);
+ pr_err("%pOF: cpu-map children should be clusters\n",
+ c);
of_node_put(c);
return -EINVAL;
}
if (leaf) {
ret = parse_core(c, cluster_id, core_id++);
} else {
- pr_err("%s: Non-leaf cluster with core %s\n",
- cluster->full_name, name);
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
ret = -EINVAL;
}
} while (c);
if (leaf && !has_cores)
- pr_warn("%s: empty cluster\n", cluster->full_name);
+ pr_warn("%pOF: empty cluster\n", cluster);
if (leaf)
cluster_id++;
void die(const char *str, struct pt_regs *regs, int err)
{
int ret;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&die_lock, flags);
oops_enter();
- raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
ret = __die(str, err, regs);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
+
+ raw_spin_unlock_irqrestore(&die_lock, flags);
+
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
}
*/
ENTRY(copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH
- # Prefetch two cache lines ahead.
- prfm pldl1strm, [x1, #128]
- prfm pldl1strm, [x1, #256]
+ // Prefetch three cache lines ahead.
+ prfm pldl1strm, [x1, #128]
+ prfm pldl1strm, [x1, #256]
+ prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
ldp x2, x3, [x1]
subs x18, x18, #128
alternative_if ARM64_HAS_NO_HW_PREFETCH
- prfm pldl1strm, [x1, #384]
+ prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
stnp x2, x3, [x0]
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
is_device_dma_coherent(dev));
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
return __swiotlb_mmap_pfn(vma, pfn, size);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
is_device_dma_coherent(dev));
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma,
- int flags)
+ int flags, unsigned long vm_flags)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
early_pgtable_alloc, flags);
+ if (!(vm_flags & VM_NO_GUARD))
+ size += PAGE_SIZE;
+
vma->addr = va_start;
vma->phys_addr = pa_start;
vma->size = size;
- vma->flags = VM_MAP;
+ vma->flags = VM_MAP | vm_flags;
vma->caller = __builtin_return_address(0);
vm_area_add_early(vma);
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
*/
- map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
+ map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
+ VM_NO_GUARD);
map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
- &vmlinux_rodata, NO_CONT_MAPPINGS);
+ &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
- &vmlinux_inittext, 0);
+ &vmlinux_inittext, 0, VM_NO_GUARD);
map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
- &vmlinux_initdata, 0);
- map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
+ &vmlinux_initdata, 0, VM_NO_GUARD);
+ map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
/*
}
node_set(nid, numa_nodes_parsed);
- pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n",
- start, (end - 1), nid);
return ret;
}
void *nd;
int tnid;
- if (start_pfn < end_pfn)
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
- start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
- else
+ if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA7100LC=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_GSC_LASI=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
CONFIG_LASI_82596=y
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_KEYBOARD_HIL_OLD is not set
CONFIG_MOUSE_SERIAL=m
+CONFIG_LEGACY_PTY_COUNT=64
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=17
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_MUX is not set
CONFIG_PDC_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=64
CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_HARMONY=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_AUTOFS4_FS=y
CONFIG_TMPFS=y
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA8X00=y
CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_RAW=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=m
CONFIG_MD_RAID1=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
CONFIG_FUSION_CTL=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
CONFIG_VORTEX=m
CONFIG_TYPHOON=m
+CONFIG_ACENIC=m
+CONFIG_ACENIC_OMIT_TIGON_I=y
+CONFIG_PCNET32=m
+CONFIG_TIGON3=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=y
CONFIG_TULIP_MMIO=y
CONFIG_PCMCIA_XIRCOM=m
CONFIG_HP100=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
CONFIG_E100=m
-CONFIG_ACENIC=m
-CONFIG_ACENIC_OMIT_TIGON_I=y
CONFIG_E1000=m
-CONFIG_TIGON3=m
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_3C574=m
CONFIG_PCMCIA_SMC91C92=m
CONFIG_PCMCIA_XIRC2PS=m
CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_CS=m
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_PDC_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HWMON is not set
# CONFIG_STI_CONSOLE is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_AUTOFS4_FS=y
CONFIG_TMPFS=y
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_BLOWFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_LASI_82596=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=y
+CONFIG_LASI_82596=y
CONFIG_PPP=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_HIL_OLD is not set
CONFIG_PRINTER=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_HARMONY=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SECURITY=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA8X00=y
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_GSC is not set
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_DEBUG=y
-CONFIG_IP_NF_QUEUE=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_NS87415=y
-CONFIG_PATA_SIL680=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_FUSION_SPI=m
CONFIG_FUSION_CTL=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
+CONFIG_ACENIC=m
+CONFIG_TIGON3=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=y
CONFIG_TULIP_MMIO=y
-CONFIG_NET_PCI=y
CONFIG_E100=m
-CONFIG_ACENIC=m
CONFIG_E1000=m
-CONFIG_TIGON3=m
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
CONFIG_SERIO=m
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_AD1889=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_LEGOTOWER=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_XFS_FS=m
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_LZO=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_SLAB=y
CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_IOMMU_CCIO=y
CONFIG_PCI=y
CONFIG_PCI_LBA=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_STI is not set
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=m
CONFIG_SND=m
+CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_AD1889=m
# CONFIG_SND_USB is not set
# CONFIG_SND_GSC is not set
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT4_FS=m
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_XFS_FS=m
CONFIG_DEBUG_SLAB_LEAK=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_PROVE_RCU_DELAY=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
CONFIG_LATENCYTOP=y
CONFIG_KEYS=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
-CONFIG_IPV6=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_MD_RAID10=y
CONFIG_BLK_DEV_DM=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-CONFIG_LASI_82596=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_NET_PCI=y
CONFIG_ACENIC=y
CONFIG_TIGON3=y
-CONFIG_NET_PCMCIA=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_LASI_82596=y
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_KEYBOARD_HIL_OLD is not set
CONFIG_MOUSE_SERIAL=y
+CONFIG_LEGACY_PTY_COUNT=64
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_CS=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_LEGACY_PTY_COUNT=64
CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=y
CONFIG_SND_AD1889=y
CONFIG_SND_HARMONY=y
CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_USB=y
CONFIG_USB_UHCI_HCD=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_KEYS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_LZO=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_PERF_EVENTS=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_LLC2=m
# CONFIG_WIRELESS is not set
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
-CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
CONFIG_AGP=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_MODE_HELPERS=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=m
CONFIG_SND_AD1889=m
CONFIG_SND_HARMONY=m
CONFIG_HIDRAW=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_RT=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_RCU_CPU_STALL_INFO=y
CONFIG_LATENCYTOP=y
CONFIG_LKDTM=m
CONFIG_KEYS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_NET_NS is not set
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CPUSETS=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_LRO=m
CONFIG_INET_DIAG=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_RAID=m
CONFIG_DM_UEVENT=y
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MDIO_BITBANG=m
CONFIG_PHYLIB=y
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
-CONFIG_REALTEK_PHY=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
CONFIG_NATIONAL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
CONFIG_STE10XP=m
-CONFIG_LSI_ET1011C_PHY=m
-CONFIG_MDIO_BITBANG=m
+CONFIG_VITESSE_PHY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SERIO_SERPORT=m
# CONFIG_HP_SDC is not set
CONFIG_SERIO_RAW=m
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_NOZOMI=m
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_AGP_PARISC=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_DRM_RADEON_UMS=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_BTRFS_FS=m
CONFIG_QUOTA=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_TIMER_STATS=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
CONFIG_LIBCRC32C=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
#define PDC_PAT_MEM_SETGM 9L /* Set Good Memory value */
#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */
- /* Memory Address */
+ /* Memory Address */
#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
unsigned long pdt_entries;
};
+struct pdc_pat_mem_phys_mem_location { /* PDC_PAT_MEM/PDC_PAT_MEM_ADDRESS */
+ u64 cabinet:8;
+ u64 ign1:8;
+ u64 ign2:8;
+ u64 cell_slot:8;
+ u64 ign3:8;
+ u64 dimm_slot:8; /* DIMM slot, e.g. 0x1A, 0x2B, show user hex value! */
+ u64 ign4:8;
+ u64 source:4; /* for mem: always 0x07 */
+ u64 source_detail:4; /* for mem: always 0x04 (SIMM or DIMM) */
+};
struct pdc_pat_pd_addr_map_entry {
unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
unsigned long *pdt_entries_ptr, unsigned long count,
unsigned long offset);
+extern int pdc_pat_mem_get_dimm_phys_location(
+ struct pdc_pat_mem_phys_mem_location *pret,
+ unsigned long phys_addr);
#endif /* __ASSEMBLY__ */
before it can be accessed through the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
- preempt_enable();
copy_page_asm(vto, vfrom);
+ preempt_enable();
}
EXPORT_SYMBOL(copy_user_page);
struct vm_area_struct *vma;
pgd_t *pgd;
+ /* Flush the TLB to avoid speculation if coherency is required. */
+ if (parisc_requires_coherency())
+ flush_tlb_all();
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long addr;
- pgd_t *pgd;
-
BUG_ON(!vma->vm_mm->context);
+ /* Flush the TLB to avoid speculation if coherency is required. */
+ if (parisc_requires_coherency())
+ flush_tlb_range(vma, start, end);
+
if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
return;
}
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- return;
- }
+ BUG_ON(vma->vm_mm->context != mfsp(3));
- pgd = vma->vm_mm->pgd;
- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
- unsigned long pfn;
- pte_t *ptep = get_ptep(pgd, addr);
- if (!ptep)
- continue;
- pfn = pte_pfn(*ptep);
- if (pfn_valid(pfn))
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
}
void
BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) {
- flush_tlb_page(vma, vmaddr);
+ if (parisc_requires_coherency())
+ flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
unsigned long offset)
{
int retval;
- unsigned long flags;
+ unsigned long flags, entries;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ,
- __pa(&pret), __pa(pdt_entries_ptr),
+ __pa(&pdc_result), __pa(pdt_entries_ptr),
count, offset);
+
+ if (retval == PDC_OK) {
+ entries = min(pdc_result[0], count);
+ pret->actual_count_bytes = entries;
+ pret->pdt_entries = entries / sizeof(unsigned long);
+ }
+
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_mem_get_dimm_phys_location - Get physical DIMM slot via PAT firmware
+ * @pret: ptr to hold returned information
+ * @phys_addr: physical address to examine
+ *
+ */
+int pdc_pat_mem_get_dimm_phys_location(
+ struct pdc_pat_mem_phys_mem_location *pret,
+ unsigned long phys_addr)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_ADDRESS,
+ __pa(&pdc_result), phys_addr);
+
+ if (retval == PDC_OK)
+ memcpy(pret, &pdc_result, sizeof(*pret));
+
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
if (regs->sr[7])
return;
+ /* exit if already in panic */
+ if (sysctl_panic_on_stackoverflow < 0)
+ return;
+
/* calculate kernel stack usage */
stack_usage = sp - stack_start;
#ifdef CONFIG_IRQSTACKS
#ifdef CONFIG_IRQSTACKS
panic_check:
#endif
- if (sysctl_panic_on_stackoverflow)
+ if (sysctl_panic_on_stackoverflow) {
+ sysctl_panic_on_stackoverflow = -1; /* disable further checks */
panic("low stack detected by irq handler - check messages\n");
+ }
#endif
}
#ifdef CONFIG_64BIT
struct pdc_pat_mem_read_pd_retinfo pat_pret;
+ /* try old obsolete PAT firmware function first */
+ pdt_type = PDT_PAT_OLD;
ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
MAX_PDT_ENTRIES);
if (ret != PDC_OK) {
- pdt_type = PDT_PAT_OLD;
+ pdt_type = PDT_PAT_NEW;
ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
MAX_PDT_TABLE_SIZE, 0);
}
}
for (i = 0; i < pdt_status.pdt_entries; i++) {
- if (i < 20)
- pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n",
- i,
- pdt_entry[i] & PAGE_MASK,
- pdt_entry[i] & 1);
+ struct pdc_pat_mem_phys_mem_location loc;
+
+ /* get DIMM slot number */
+ loc.dimm_slot = 0xff;
+#ifdef CONFIG_64BIT
+ pdc_pat_mem_get_dimm_phys_location(&loc, pdt_entry[i]);
+#endif
+
+ pr_warn("PDT: BAD PAGE #%d at 0x%08lx, "
+ "DIMM slot %02x (error_type = %lu)\n",
+ i,
+ pdt_entry[i] & PAGE_MASK,
+ loc.dimm_slot,
+ pdt_entry[i] & 1);
/* mark memory page bad */
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
#include <linux/uaccess.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
+#include <linux/nmi.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
+ lockup_detector_suspend();
for (;;);
}
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
+ *(.text.div)
+ *($$*) /* millicode routines */
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le
UTS_MACHINE := $(subst $(space),,$(machine-y))
+# XXX This needs to be before we override LD below
+ifdef CONFIG_PPC32
+KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+else
+ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
+# Have the linker provide sfpr if possible.
+# There is a corresponding test in arch/powerpc/lib/Makefile
+KBUILD_LDFLAGS_MODULE += --save-restore-funcs
+else
+KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+endif
+endif
+
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override LD += -EL
LDEMULATION := lppc
CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
-ifdef CONFIG_PPC32
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
-else
-ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
-# Have the linker provide sfpr if possible.
-# There is a corresponding test in arch/powerpc/lib/Makefile
-KBUILD_LDFLAGS_MODULE += --save-restore-funcs
-else
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
-endif
-endif
-
ifeq ($(CONFIG_476FPE_ERR46),y)
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \
-T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds
#define PRTS_MASK 0x1f /* process table size field */
#define PRTB_MASK 0x0ffffffffffff000UL
-/*
- * Limit process table to PAGE_SIZE table. This
- * also limit the max pid we can support.
- * MAX_USER_CONTEXT * 16 bytes of space.
- */
-#define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4)
-#define PRTB_ENTRIES (1ul << CONTEXT_BITS)
+/* Number of supported PID bits */
+extern unsigned int mmu_pid_bits;
+
+/* Base PID to allocate from */
+extern unsigned int mmu_base_pid;
+
+#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
+#define PRTB_ENTRIES (1ul << mmu_pid_bits)
/*
* Power9 currently only support 64K partition table size.
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
- struct mm_struct *next);
+ struct mm_struct *next);
static inline void switch_mmu_context(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
extern void mmu_context_init(void);
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
+#else
+static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
+#endif
+
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
struct mm_struct *next,
struct task_struct *tsk)
{
+ bool new_on_cpu = false;
+
/* Mark this context has been used on the new CPU */
- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ new_on_cpu = true;
+ }
/* 32-bit keeps track of the current PGDIR in the thread struct */
#ifdef CONFIG_PPC32
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */
+
+ if (new_on_cpu)
+ radix_kvm_prefetch_workaround(next);
+
/*
* The actual HW switching method differs between the various
* sub architectures. Out of line for now
goto out;
}
- if (kvm->arch.hpt.virt)
+ if (kvm->arch.hpt.virt) {
kvmppc_free_hpt(&kvm->arch.hpt);
+ kvmppc_rmap_reset(kvm);
+ }
err = kvmppc_allocate_hpt(&info, order);
if (err < 0)
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
+ /* Enable TM so we can read the TM SPRs */
+ mtmsr(mfmsr() | MSR_TM);
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
ori r6,r6,1
mtspr SPRN_CTRLT,r6
4:
- /* Read the guest SLB and save it away */
+ /* Check if we are running hash or radix and store it in cr2 */
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
- cmpwi r0, 0
+ cmpwi cr2,r0,0
+
+ /* Read the guest SLB and save it away */
li r5, 0
- bne 3f /* for radix, save 0 entries */
+ bne cr2, 3f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22:
- /* Clear out SLB */
- li r5,0
- slbmte r5,r5
- slbia
- ptesync
/* Restore host values of some registers */
BEGIN_FTR_SECTION
mtspr SPRN_PID, r7
mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+#ifdef CONFIG_PPC_RADIX_MMU
+ /*
+ * Are we running hash or radix ?
+ */
+ beq cr2,3f
+
+ /* Radix: Handle the case where the guest used an illegal PID */
+ LOAD_REG_ADDR(r4, mmu_base_pid)
+ lwz r3, VCPU_GUEST_PID(r9)
+ lwz r5, 0(r4)
+ cmpw cr0,r3,r5
+ blt 2f
+
+ /*
+ * Illegal PID, the HW might have prefetched and cached in the TLB
+ * some translations for the LPID 0 / guest PID combination which
+ * Linux doesn't know about, so we need to flush that PID out of
+ * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
+ * the right context.
+ */
+ li r0,0
+ mtspr SPRN_LPID,r0
+ isync
+
+ /* Then do a congruence class local flush */
+ ld r6,VCPU_KVM(r9)
+ lwz r0,KVM_TLB_SETS(r6)
+ mtctr r0
+ li r7,0x400 /* IS field = 0b01 */
+ ptesync
+ sldi r0,r3,32 /* RS has PID */
+1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
+ addi r7,r7,0x1000
+ bdnz 1b
+ ptesync
+
+2: /* Flush the ERAT on radix P9 DD1 guest exit */
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
+ b 4f
+#endif /* CONFIG_PPC_RADIX_MMU */
+ /* Hash: clear out SLB */
+3: li r5,0
+ slbmte r5,r5
+ slbia
+ ptesync
+4:
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
static int radix__init_new_context(struct mm_struct *mm)
{
unsigned long rts_field;
- int index;
+ int index, max_id;
- index = alloc_context_id(1, PRTB_ENTRIES - 1);
+ max_id = (1 << mmu_pid_bits) - 1;
+ index = alloc_context_id(mmu_base_pid, max_id);
if (index < 0)
return index;
#include <trace/events/thp.h>
+unsigned int mmu_pid_bits;
+unsigned int mmu_base_pid;
+
static int native_register_process_table(unsigned long base, unsigned long pg_sz,
unsigned long table_size)
{
for_each_memblock(memory, reg)
WARN_ON(create_physical_mapping(reg->base,
reg->base + reg->size));
+
+ /* Find out how many PID bits are supported */
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 20;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /*
+ * When KVM is possible, we only use the top half of the
+ * PID space to avoid collisions between host and guest PIDs
+ * which can cause problems due to prefetch when exiting the
+ * guest with AIL=3
+ */
+ mmu_base_pid = 1 << (mmu_pid_bits - 1);
+#else
+ mmu_base_pid = 1;
+#endif
+ } else {
+ /* The guest uses the bottom half of the PID space */
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 19;
+ mmu_base_pid = 1;
+ }
+
/*
* Allocate Partition table and process table for the
* host.
*/
- BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
+ BUG_ON(PRTB_SIZE_SHIFT > 36);
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/*
* Fill in the process table.
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
+ /* Find MMU PID size */
+ prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
+ if (prop && size == 4)
+ mmu_pid_bits = be32_to_cpup(prop);
+
+ /* Grab page size encodings */
prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
if (!prop)
return 0;
}
}
addr = 0;
- for (i = 0; i < 2; ++i) {
+ for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
p = spt->protptrs[i];
if (!p)
continue;
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
-#include <asm/ppc-opcode.h>
+#include <asm/ppc-opcode.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/trace.h>
-
+#include <asm/cputhreads.h>
#define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
else
radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+{
+ unsigned int pid = mm->context.id;
+
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ return;
+
+ /*
+ * If this context hasn't run on that CPU before and KVM is
+ * around, there's a slim chance that the guest on another
+ * CPU just brought in obsolete translation into the TLB of
+ * this CPU due to a bad prefetch using the guest PID on
+ * the way into the hypervisor.
+ *
+ * We work around this here. If KVM is possible, we check if
+ * any sibling thread is in KVM. If it is, the window may exist
+ * and thus we flush that PID from the core.
+ *
+ * A potential future improvement would be to mark which PIDs
+ * have never been used on the system and avoid it if the PID
+ * is new and the process has no other cpumask bit set.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
+ int cpu = smp_processor_id();
+ int sib = cpu_first_thread_sibling(cpu);
+ bool flush = false;
+
+ for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
+ if (sib == cpu)
+ continue;
+ if (paca[sib].kvm_hstate.kvm_vcpu)
+ flush = true;
+ }
+ if (flush)
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ }
+}
+EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
of_detach_node(np);
of_node_put(parent);
- of_node_put(np); /* Must decrement the refcount */
return 0;
}
psw_bits(regs.psw).ia = sfr->basic.ia;
psw_bits(regs.psw).dat = sfr->basic.T;
psw_bits(regs.psw).wait = sfr->basic.W;
- psw_bits(regs.psw).per = sfr->basic.P;
+ psw_bits(regs.psw).pstate = sfr->basic.P;
psw_bits(regs.psw).as = sfr->basic.AS;
/*
{
uint8_t *keys;
uint64_t hva;
- int i, r = 0;
+ int srcu_idx, i, r = 0;
if (args->flags != 0)
return -EINVAL;
return -ENOMEM;
down_read(¤t->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
if (r)
break;
}
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(¤t->mm->mmap_sem);
if (!r) {
{
uint8_t *keys;
uint64_t hva;
- int i, r = 0;
+ int srcu_idx, i, r = 0;
if (args->flags != 0)
return -EINVAL;
goto out;
down_read(¤t->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
if (r)
break;
}
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(¤t->mm->mmap_sem);
out:
kvfree(keys);
unsigned long ptev;
pgste_t pgste;
- /* Clear storage key */
+ /* Clear storage key ACC and F, but set R/C */
preempt_disable();
pgste = pgste_get_lock(ptep);
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
- PGSTE_GR_BIT | PGSTE_GC_BIT);
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
#include "ctype.h"
#include "string.h"
+/*
+ * Undef these macros so that the functions that we provide
+ * here will have the correct names regardless of how string.h
+ * may have chosen to #define them.
+ */
+#undef memcpy
+#undef memset
+#undef memcmp
+
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
#ifdef CONFIG_HAVE_KVM
apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
+apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
#define SKX_UPI_PCI_PMON_CTL0 0x350
#define SKX_UPI_PCI_PMON_CTR0 0x318
#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
-#define SKX_PMON_CTL_UMASK_EXT 0xff
+#define SKX_UPI_CTL_UMASK_EXT 0xffefff
/* SKX M2M */
#define SKX_M2M_PCI_PMON_CTL0 0x228
DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
-DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39");
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
-DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
&format_attr_inv.attr,
&format_attr_thresh8.attr,
&format_attr_filter_tid4.attr,
- &format_attr_filter_link4.attr,
&format_attr_filter_state5.attr,
&format_attr_filter_rem.attr,
&format_attr_filter_loc.attr,
&format_attr_filter_opc_0.attr,
&format_attr_filter_opc_1.attr,
&format_attr_filter_nc.attr,
- &format_attr_filter_c6.attr,
&format_attr_filter_isoc.attr,
NULL,
};
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
+ EVENT_EXTRA_END
};
static u64 skx_cha_filter_mask(int fields)
mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
if (fields & 0x4)
mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8) {
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
+ }
return mask;
}
.format_group = &skx_uncore_format_group,
};
+static struct attribute *skx_uncore_pcu_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge_det.attr,
+ &format_attr_filter_band0.attr,
+ &format_attr_filter_band1.attr,
+ &format_attr_filter_band2.attr,
+ &format_attr_filter_band3.attr,
+ NULL,
+};
+
+static struct attribute_group skx_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = skx_uncore_pcu_formats_attr,
+};
+
static struct intel_uncore_ops skx_uncore_pcu_ops = {
IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
.hw_config = hswep_pcu_hw_config,
.box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
.num_shared_regs = 1,
.ops = &skx_uncore_pcu_ops,
- .format_group = &snbep_uncore_pcu_format_group,
+ .format_group = &skx_uncore_pcu_format_group,
};
static struct intel_uncore_type *skx_msr_uncores[] = {
.perf_ctr_bits = 48,
.perf_ctr = SKX_UPI_PCI_PMON_CTR0,
.event_ctl = SKX_UPI_PCI_PMON_CTL0,
- .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
- .event_mask_ext = SKX_PMON_CTL_UMASK_EXT,
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
.box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
.ops = &skx_upi_uncore_pci_ops,
.format_group = &skx_upi_uncore_format_group,
smp_kvm_posted_intr_ipi)
BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
smp_kvm_posted_intr_wakeup_ipi)
+BUILD_INTERRUPT3(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR,
+ smp_kvm_posted_intr_nested_ipi)
#endif
/*
#ifdef CONFIG_HAVE_KVM
unsigned int kvm_posted_intr_ipis;
unsigned int kvm_posted_intr_wakeup_ipis;
+ unsigned int kvm_posted_intr_nested_ipis;
#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
extern asmlinkage void x86_platform_ipi(void);
extern asmlinkage void kvm_posted_intr_ipi(void);
extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
+extern asmlinkage void kvm_posted_intr_nested_ipi(void);
extern asmlinkage void error_interrupt(void);
extern asmlinkage void irq_work_interrupt(void);
#define trace_reboot_interrupt reboot_interrupt
#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi
+#define trace_kvm_posted_intr_nested_ipi kvm_posted_intr_nested_ipi
#endif /* CONFIG_TRACING */
#ifdef CONFIG_X86_LOCAL_APIC
*/
#define X86_PLATFORM_IPI_VECTOR 0xf7
-#define POSTED_INTR_WAKEUP_VECTOR 0xf1
/*
* IRQ work vector:
*/
/* Vector for KVM to deliver posted interrupt IPI */
#ifdef CONFIG_HAVE_KVM
#define POSTED_INTR_VECTOR 0xf2
+#define POSTED_INTR_WAKEUP_VECTOR 0xf1
+#define POSTED_INTR_NESTED_VECTOR 0xf0
#endif
/*
* This file is licensed under GPLv2.
*/
-#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
#include <linux/math64.h>
#include <linux/percpu.h>
#include <linux/smp.h>
struct aperfmperf_sample {
unsigned int khz;
- unsigned long jiffies;
+ ktime_t time;
u64 aperf;
u64 mperf;
};
static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
+#define APERFMPERF_CACHE_THRESHOLD_MS 10
+#define APERFMPERF_REFRESH_DELAY_MS 20
+#define APERFMPERF_STALE_THRESHOLD_MS 1000
+
/*
* aperfmperf_snapshot_khz()
* On the current CPU, snapshot APERF, MPERF, and jiffies
u64 aperf, aperf_delta;
u64 mperf, mperf_delta;
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
+ ktime_t now = ktime_get();
+ s64 time_delta = ktime_ms_delta(now, s->time);
- /* Don't bother re-computing within 10 ms */
- if (time_before(jiffies, s->jiffies + HZ/100))
+ /* Don't bother re-computing within the cache threshold time. */
+ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
return;
rdmsrl(MSR_IA32_APERF, aperf);
if (mperf_delta == 0)
return;
- /*
- * if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then
- * khz = (cpu_khz * aperf_delta) / mperf_delta
- */
- if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta)
- s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
- else /* khz = aperf_delta / (mperf_delta / cpu_khz) */
- s->khz = div64_u64(aperf_delta,
- div64_u64(mperf_delta, cpu_khz));
- s->jiffies = jiffies;
+ s->time = now;
s->aperf = aperf;
s->mperf = mperf;
+
+ /* If the previous iteration was too long ago, discard it. */
+ if (time_delta > APERFMPERF_STALE_THRESHOLD_MS)
+ s->khz = 0;
+ else
+ s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
}
unsigned int arch_freq_get_on_cpu(int cpu)
{
+ unsigned int khz;
+
if (!cpu_khz)
return 0;
return 0;
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+ khz = per_cpu(samples.khz, cpu);
+ if (khz)
+ return khz;
+
+ msleep(APERFMPERF_REFRESH_DELAY_MS);
+ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
return per_cpu(samples.khz, cpu);
}
seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
seq_puts(p, " Posted-interrupt notification event\n");
+ seq_printf(p, "%*s: ", prec, "NPI");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ",
+ irq_stats(j)->kvm_posted_intr_nested_ipis);
+ seq_puts(p, " Nested posted-interrupt event\n");
+
seq_printf(p, "%*s: ", prec, "PIW");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
exiting_irq();
set_irq_regs(old_regs);
}
+
+/*
+ * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
+ */
+__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ entering_ack_irq();
+ inc_irq_stat(kvm_posted_intr_nested_ipis);
+ exiting_irq();
+ set_irq_regs(old_regs);
+}
#endif
__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi);
/* IPI for KVM to deliver interrupt to wake up tasks */
alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi);
+ /* IPI for KVM to deliver nested posted interrupt */
+ alloc_intr_gate(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi);
#endif
/* IPI vectors for APIC spurious and error interrupts */
int arch_prepare_kprobe(struct kprobe *p)
{
+ int ret;
+
if (alternatives_text_reserved(p->addr, p->addr))
return -EINVAL;
if (!p->ainsn.insn)
return -ENOMEM;
- return arch_copy_kprobe(p);
+ ret = arch_copy_kprobe(p);
+ if (ret) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+
+ return ret;
}
void arch_arm_kprobe(struct kprobe *p)
/*
* The DMI quirks table takes precedence. If no quirks entry
- * matches and the ACPI Hardware Reduced bit is set, force EFI
- * reboot.
+ * matches and the ACPI Hardware Reduced bit is set and EFI
+ * runtime services are enabled, force EFI reboot.
*/
rv = dmi_check_system(reboot_dmi_table);
- if (!rv && efi_reboot_required())
+ if (!rv && efi_reboot_required() && !efi_runtime_disabled())
reboot_type = BOOT_EFI;
return 0;
static void cancel_hv_timer(struct kvm_lapic *apic)
{
+ WARN_ON(preemptible());
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
- preempt_enable();
}
static bool start_hv_timer(struct kvm_lapic *apic)
struct kvm_timer *ktimer = &apic->lapic_timer;
int r;
+ WARN_ON(preemptible());
if (!kvm_x86_ops->set_hv_timer)
return false;
static void start_sw_timer(struct kvm_lapic *apic)
{
struct kvm_timer *ktimer = &apic->lapic_timer;
+
+ WARN_ON(preemptible());
if (apic->lapic_timer.hv_timer_in_use)
cancel_hv_timer(apic);
if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
static void restart_apic_timer(struct kvm_lapic *apic)
{
+ preempt_disable();
if (!start_hv_timer(apic))
start_sw_timer(apic);
+ preempt_enable();
}
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ preempt_disable();
+ /* If the preempt notifier has already run, it also called apic_timer_expired */
+ if (!apic->lapic_timer.hv_timer_in_use)
+ goto out;
WARN_ON(swait_active(&vcpu->wq));
cancel_hv_timer(apic);
apic_timer_expired(apic);
advance_periodic_target_expiration(apic);
restart_apic_timer(apic);
}
+out:
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ preempt_disable();
/* Possibly the TSC deadline timer is not enabled yet */
if (apic->lapic_timer.hv_timer_in_use)
start_sw_timer(apic);
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
struct kvm_vcpu vcpu;
unsigned long host_rsp;
u8 fail;
- bool nmi_known_unmasked;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
}
}
-static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
+ bool nested)
{
#ifdef CONFIG_SMP
+ int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
+
if (vcpu->mode == IN_GUEST_MODE) {
struct vcpu_vmx *vmx = to_vmx(vcpu);
*/
WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
- apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
- POSTED_INTR_VECTOR);
+ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
return true;
}
#endif
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/* the PIR and ON have been set by L1. */
- kvm_vcpu_trigger_posted_interrupt(vcpu);
+ kvm_vcpu_trigger_posted_interrupt(vcpu, true);
/*
* If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry.
if (pi_test_and_set_on(&vmx->pi_desc))
return;
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu))
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
}
vmcs12->vm_entry_instruction_len);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs12->guest_interruptibility_info);
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
} else {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
- /*
- * Note that we use L0's vector here and in
- * vmx_deliver_nested_posted_interrupt.
- */
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
*/
vmx_flush_tlb(vcpu);
}
-
+ /* Restore posted intr vector. */
+ if (nested_cpu_has_posted_intr(vmcs12))
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
(unsigned long *)&vcpu->arch.regs_avail))
return true;
- gfn = (kvm_read_cr3(vcpu) & ~31ul) >> PAGE_SHIFT;
- offset = (kvm_read_cr3(vcpu) & ~31ul) & (PAGE_SIZE - 1);
+ gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
+ offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0)
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
+ /* not support for RQF_PM and ->rpm_status in blk-mq yet */
+ if (q->mq_ops)
+ return;
+
q->dev = dev;
q->rpm_status = RPM_ACTIVE;
pm_runtime_set_autosuspend_delay(q->dev, -1);
static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
{
/*
- * Non online CPU will be mapped to queue index 0.
+ * Non present CPU will be mapped to queue index 0.
*/
- if (!cpu_online(cpu))
+ if (!cpu_present(cpu))
return 0;
return cpu % nr_queues;
}
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
+ if (!authsize)
+ goto decrypt;
+
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
+decrypt:
+
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
+static bool ec_no_wakeup __read_mostly;
+module_param(ec_no_wakeup, bool, 0644);
+MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
+
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+
+void acpi_ec_flush_work(void)
+{
+ if (first_ec)
+ __acpi_ec_flush_event(first_ec);
+
+ flush_scheduled_work();
+}
#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
return 0;
}
+static int acpi_ec_suspend_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ /*
+ * The SCI handler doesn't run at this point, so the GPE can be
+ * masked at the low level without side effects.
+ */
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+
+ return 0;
+}
+
+static int acpi_ec_resume_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+
+ return 0;
+}
+
static int acpi_ec_resume(struct device *dev)
{
struct acpi_ec *ec =
#endif
static const struct dev_pm_ops acpi_ec_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+#ifdef CONFIG_PM_SLEEP
+void acpi_ec_flush_work(void);
+#endif
+
/*--------------------------------------------------------------------------
Suspend/Resume
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- /* SRAT: Static Resource Affinity Table */
+ /* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
struct acpi_subtable_proc srat_proc[3];
/*
* Process all pending events in case there are any wakeup ones.
*
- * The EC driver uses the system workqueue, so that one needs to be
- * flushed too.
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
*/
+ acpi_ec_flush_work();
acpi_os_wait_events_complete();
- flush_scheduled_work();
s2idle_wakeup = false;
}
{
if (dev && dev->dma_mem)
return dev->dma_mem;
- return dma_coherent_default_memory;
+ return NULL;
}
static inline dma_addr_t dma_get_device_base(struct device *dev,
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev: device from which we allocate memory
- * @size: size of requested memory area
- * @dma_handle: This will be filled with the correct dma handle
- * @ret: This pointer will be filled with the virtual address
- * to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
+ void *ret;
- if (!mem)
- return 0;
-
- *ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
goto err;
/*
- * Memory was found in the per-device area.
+ * Memory was found in the coherent area.
*/
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
if (dma_memory_map)
- memset(*ret, 0, size);
+ memset(ret, 0, size);
else
- memset_io(*ret, 0, size);
+ memset_io(ret, 0, size);
- return 1;
+ return ret;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
+ return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev: device from which we allocate memory
+ * @size: size of requested memory area
+ * @dma_handle: This will be filled with the correct dma handle
+ * @ret: This pointer will be filled with the virtual address
+ * to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ if (!mem)
+ return 0;
+
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ if (*ret)
+ return 1;
+
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
-EXPORT_SYMBOL(dma_alloc_from_coherent);
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev: device from which the memory was allocated
- * @order: the order of pages allocated
- * @vaddr: virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ if (!dma_coherent_default_memory)
+ return NULL;
+
+ return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+ dma_handle);
+}
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+ int order, void *vaddr)
+{
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
}
return 0;
}
-EXPORT_SYMBOL(dma_release_from_coherent);
/**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
* @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_coherent
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
- * @ret: result from remap_pfn_range()
+ * @order: the order of pages allocated
+ * @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
+ * coherent memory pool and if so, releases that memory.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
*/
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_release_from_coherent(dma_coherent_default_memory, order,
+ vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
}
return 0;
}
-EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
+ * @size: size of the memory buffer allocated
+ * @ret: result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+ size_t size, int *ret)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+ vaddr, size, ret);
+}
/*
* Support for reserved memory regions defined in device tree
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
continue;
}
sk_set_memalloc(sock->sk);
- sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
+ clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
+
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
int i, ret;
for (i = 0; i < config->num_connections; i++) {
+ struct nbd_sock *nsock = config->socks[i];
+
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
+ mutex_unlock(&nsock->tx_lock);
}
}
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
- &config->runtime_flags))
- send_disconnects(nbd);
+ set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ send_disconnects(nbd);
return 0;
}
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
- config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ config->socks[i]->sock->sk->sk_sndtimeo =
+ nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
- for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
- if (sysfs_streq(buf, virtblk_cache_types[i]))
- break;
-
+ i = sysfs_match_string(virtblk_cache_types, buf);
if (i < 0)
- return -EINVAL;
+ return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
};
struct blkif_req {
- int error;
+ blk_status_t error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
* existing persistent grants, or if we have to get new grants,
* as there are not sufficiently many free.
*/
+ bool new_persistent_gnts = false;
struct scatterlist *sg;
int num_sg, max_grefs, num_grant;
*/
max_grefs += INDIRECT_GREFS(max_grefs);
- /*
- * We have to reserve 'max_grefs' grants because persistent
- * grants are shared by all rings.
- */
- if (max_grefs > 0)
- if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
+ /* Check if we have enough persistent grants to allocate a requests */
+ if (rinfo->persistent_gnts_c < max_grefs) {
+ new_persistent_gnts = true;
+
+ if (gnttab_alloc_grant_references(
+ max_grefs - rinfo->persistent_gnts_c,
+ &setup.gref_head) < 0) {
gnttab_request_free_callback(
&rinfo->callback,
blkif_restart_queue_callback,
rinfo,
- max_grefs);
+ max_grefs - rinfo->persistent_gnts_c);
return 1;
}
+ }
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req;
- if (max_grefs > 0)
+ if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
return 0;
return BLK_STS_IOERR;
out_busy:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_RESOURCE;
}
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
return 0;
}
-static unsigned int intel_pstate_get(unsigned int cpu_num)
-{
- struct cpudata *cpu = all_cpu_data[cpu_num];
-
- return cpu ? get_avg_frequency(cpu) : 0;
-}
-
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
.setpolicy = intel_pstate_set_policy,
.suspend = intel_pstate_hwp_save_state,
.resume = intel_pstate_resume,
- .get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
config CRYPTO_DEV_BCM_SPU
tristate "Broadcom symmetric crypto/hash acceleration support"
depends on ARCH_BCM_IPROC
- depends on BCM_PDC_MBOX
+ depends on MAILBOX
default m
select CRYPTO_DES
select CRYPTO_MD5
break;
case HASH_ALG_SHA3_512:
*spu2_type = SPU2_HASH_TYPE_SHA3_512;
+ break;
case HASH_ALG_LAST:
default:
err = -EINVAL;
#define SE_GROUP 0
#define DRIVER_VERSION "1.0"
+#define FW_DIR "cavium/"
/* SE microcode */
-#define SE_FW "cnn55xx_se.fw"
+#define SE_FW FW_DIR "cnn55xx_se.fw"
static const char nitrox_driver_name[] = "CNN55XX";
struct device *dev = &pdev->dev;
struct resource *res;
struct safexcel_crypto_priv *priv;
- u64 dma_mask;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
return -EPROBE_DEFER;
}
- if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
- dma_mask = DMA_BIT_MASK(64);
- ret = dma_set_mask_and_coherent(dev, dma_mask);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
goto err_clk;
}
EXPORT_SYMBOL_GPL(dax_write_cache);
+bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
result = idr_find(&fpriv->bo_list_handles, id);
if (result) {
- if (kref_get_unless_zero(&result->refcount))
+ if (kref_get_unless_zero(&result->refcount)) {
+ rcu_read_unlock();
mutex_lock(&result->lock);
- else
+ } else {
+ rcu_read_unlock();
result = NULL;
+ }
+ } else {
+ rcu_read_unlock();
}
- rcu_read_unlock();
return result;
}
static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
+
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
}
pp_table->AvfsGbCksOff.m2_shift = 12;
pp_table->AvfsGbCksOff.b_shift = 0;
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].sclk_offset == 0)
- pp_table->StaticVoltageOffsetVid[i] = 248;
- else
- pp_table->StaticVoltageOffsetVid[i] =
- (uint8_t)(dep_table->entries[i].sclk_offset *
- VOLTAGE_VID_OFFSET_SCALE2 /
- VOLTAGE_VID_OFFSET_SCALE1);
- }
+ for (i = 0; i < dep_table->count; i++)
+ pp_table->StaticVoltageOffsetVid[i] =
+ convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
- char id[6];
+ char id[7];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
seq_puts(m, "\t\tType: N/A\n");
}
+ memset(id, 0, sizeof(id));
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
config DRM_EXYNOS_HDMI
bool "HDMI"
depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you want to use Exynos HDMI for DRM.
struct component_match *match;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match))
return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
- if (!dsi->bridge_node)
- return -EINVAL;
return 0;
}
return ret;
}
- bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge)
- drm_bridge_attach(encoder, bridge, NULL);
+ if (dsi->bridge_node) {
+ bridge = of_drm_find_bridge(dsi->bridge_node);
+ if (bridge)
+ drm_bridge_attach(encoder, bridge, NULL);
+ }
return mipi_dsi_host_register(&dsi->dsi_host);
}
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
- int ret;
- mic->bridge.funcs = &mic_bridge_funcs;
- mic->bridge.of_node = dev->of_node;
mic->bridge.driver_private = mic;
- ret = drm_bridge_add(&mic->bridge);
- if (ret)
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
+ return 0;
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
already_disabled:
mutex_unlock(&mic_mutex);
-
- drm_bridge_remove(&mic->bridge);
}
static const struct component_ops exynos_mic_component_ops = {
platform_set_drvdata(pdev, mic);
+ mic->bridge.funcs = &mic_bridge_funcs;
+ mic->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&mic->bridge);
+ if (ret) {
+ DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+ return ret;
+ }
+
pm_runtime_enable(dev);
ret = component_add(dev, &exynos_mic_component_ops);
static int exynos_mic_remove(struct platform_device *pdev)
{
+ struct exynos_mic *mic = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
+
+ drm_bridge_remove(&mic->bridge);
+
return 0;
}
*/
cancel_delayed_work(&hdata->hotplug_work);
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
-
- hdmiphy_disable(hdata);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
return hdmi_bridge_init(hdata);
}
-static struct of_device_id hdmi_match_types[] = {
+static const struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos4210-hdmi",
.data = &exynos4210_hdmi_driver_data,
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_hdmi_suspend(struct device *dev)
+static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
return 0;
}
-static int exynos_hdmi_resume(struct device *dev)
+static int __maybe_unused exynos_hdmi_resume(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
return 0;
}
-#endif
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
.atomic_check = mixer_atomic_check,
};
-static struct mixer_drv_data exynos5420_mxr_drv_data = {
+static const struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos5250_mxr_drv_data = {
+static const struct mixer_drv_data exynos5250_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos4212_mxr_drv_data = {
+static const struct mixer_drv_data exynos4212_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
-static struct mixer_drv_data exynos4210_mxr_drv_data = {
+static const struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
.has_sclk = 1,
};
-static struct of_device_id mixer_match_types[] = {
+static const struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
.data = &exynos4210_mxr_drv_data,
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
- bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
- hrtimer_cancel(&irq->vblank_timer.timer);
-
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- have_enabled_pipe =
- pipe_is_enabled(vgpu, pipe);
- if (have_enabled_pipe)
- break;
+ if (pipe_is_enabled(vgpu, pipe))
+ goto out;
}
}
- if (have_enabled_pipe)
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
+ /* all the pipes are disabled */
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ return;
+
+out:
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
return NOTIFY_DONE;
}
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
struct clflush *clflush;
*/
if (!i915_gem_object_has_struct_page(obj)) {
obj->cache_dirty = false;
- return;
+ return false;
}
/* If the GPU is snooping the contents of the CPU cache,
* tracking.
*/
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
- return;
+ return false;
trace_i915_gem_object_clflush(obj);
}
obj->cache_dirty = false;
+ return true;
}
struct drm_i915_private;
struct drm_i915_gem_object;
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags);
#define I915_CLFLUSH_FORCE BIT(0)
#define I915_CLFLUSH_SYNC BIT(1)
eb->args->flags |= __EXEC_HAS_RELOC;
}
- entry->flags |= __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma));
-
if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_get_fence(vma);
if (unlikely(err)) {
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma));
+
return 0;
}
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
+ if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
return -EFAULT;
do {
}
}
- return err ?: have_copy;
+ return err;
}
static int eb_relocate(struct i915_execbuffer *eb)
int err;
for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+ struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct i915_vma *vma = exec_to_vma(entry);
struct drm_i915_gem_object *obj = vma->obj;
eb->request->capture_list = capture;
}
+ if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
+ if (i915_gem_clflush_object(obj, 0))
+ entry->flags &= ~EXEC_OBJECT_ASYNC;
+ }
+
if (entry->flags & EXEC_OBJECT_ASYNC)
goto skip_flushes;
- if (unlikely(obj->cache_dirty && !obj->cache_coherent))
- i915_gem_clflush_object(obj, 0);
-
err = i915_gem_request_await_object
(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
if (err)
goto err_unlock;
err = eb_relocate(&eb);
- if (err)
+ if (err) {
/*
* If the user expects the execobject.offset and
* reloc.presumed_offset to be an exact match,
* relocation.
*/
args->flags &= ~__EXEC_HAS_RELOC;
- if (err < 0)
goto err_vma;
+ }
if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma->flags--;
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val &= ~LOADGEN_SELECT;
- if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
- ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
+ if ((rate <= 600000 && width == 4 && ln >= 1) ||
+ (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
intel_finish_page_flip_cs(dev_priv, crtc->pipe);
}
-static void intel_update_primary_planes(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.visible) {
- trace_intel_update_plane(&plane->base,
- to_intel_crtc(crtc));
-
- plane->update_plane(plane,
- to_intel_crtc_state(crtc->state),
- plane_state);
- }
- }
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_atomic_state *state;
int ret;
+
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
drm_modeset_backoff(ctx);
}
-
- /* reset doesn't touch the display, but flips might get nuked anyway, */
- if (!i915.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
int ret;
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ if (!state)
+ goto unlock;
+
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
- if (!state) {
- /*
- * Flips in the rings have been nuked by the reset,
- * so update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * FIXME: Atomic will make this obsolete since we won't schedule
- * CS-based flips (which might get lost in gpu resets) any more.
- */
- intel_update_primary_planes(dev);
- } else {
- ret = __intel_display_resume(dev, state, ctx);
+ /* for testing only restore the display */
+ ret = __intel_display_resume(dev, state, ctx);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- }
} else {
/*
* The display has been reset as well,
intel_hpd_init(dev_priv);
}
- if (state)
- drm_atomic_state_put(state);
+ drm_atomic_state_put(state);
+unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
u64 power_domain_mask;
bool active;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_crtc_init_scalers(crtc, pipe_config);
+
+ pipe_config->scaler_state.scaler_id = -1;
+ pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+ }
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_crtc_init_scalers(crtc, pipe_config);
-
- pipe_config->scaler_state.scaler_id = -1;
- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
- }
-
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself.
+ * arm itself. Thus we always start the full update
+ * with a CURCNTR write.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
*
* CURCNTR and CUR_FBC_CTL are always
* armed by the CURBASE write only.
plane->cursor.cntl = cntl;
} else {
I915_WRITE_FW(CURPOS(pipe), pos);
+ I915_WRITE_FW(CURBASE(pipe), base);
}
POSTING_READ_FW(CURBASE(pipe));
return true;
if (IS_SKYLAKE(dev_priv))
return true;
- if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ if (IS_KABYLAKE(dev_priv))
return true;
return false;
}
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation && ddb_allocation /
- fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
+ else if (ddb_allocation >=
+ fixed_16_16_to_u32_round_up(plane_blocks_per_line))
selected_result = min_fixed_16_16(method1, method2);
else if (latency >= linetime_us)
selected_result = min_fixed_16_16(method1, method2);
mkwrite_device_info(i915)->ring_mask = BIT(0);
i915->engine[RCS] = mock_engine(i915, "mock");
if (!i915->engine[RCS])
- goto err_dependencies;
+ goto err_priorities;
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
- if (msg->size == 0)
- return msg->size;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- struct drm_crtc *crtc;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
drm_crtc_force_disable_all(dev);
}
- /* Make sure that drm and hw vblank irqs get properly disabled. */
- drm_for_each_crtc(crtc, dev)
- drm_crtc_vblank_off(crtc);
-
/* disable flip completion events */
nvif_notify_put(&drm->flip);
drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
- nv_encoder->i2c = &nv_connector->aux.ddc;
+ if (disp->disp->oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
+ }
nv_encoder->aux = aux;
}
/*TODO: Use DP Info Table to check for support. */
- if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ if (disp->disp->oclass >= GF110_DISP) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id,
&nv_encoder->dp.mstm);
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
+ if (crtc_state->active && !asyh->state.active)
+ drm_crtc_vblank_off(crtc);
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
nv50_head_flush_set(head, asyh);
interlock_core = 1;
}
- }
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc->state->event)
- drm_crtc_vblank_get(crtc);
+ if (asyh->state.active) {
+ if (!crtc_state->active)
+ drm_crtc_vblank_on(crtc);
+ if (asyh->state.event)
+ drm_crtc_vblank_get(crtc);
+ }
}
/* Update plane(s). */
if (crtc->state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
- drm_accurate_vblank_count(crtc);
+ if (crtc->state->active)
+ drm_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
- drm_crtc_vblank_put(crtc);
+ if (crtc->state->active)
+ drm_crtc_vblank_put(crtc);
}
}
unsigned proto_evo:4;
enum nvkm_ior_proto {
CRT,
+ TV,
TMDS,
LVDS,
DP,
u8 type[3];
} pior;
- struct nv50_disp_chan *chan[17];
+ struct nv50_disp_chan *chan[21];
};
void nv50_disp_super_1(struct nv50_disp *);
case 0:
switch (outp->info.type) {
case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
+ case DCB_OUTPUT_TV : *type = DAC; return TV;
case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
case DCB_OUTPUT_DP : *type = SOR; return DP;
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
nvkm-y += nvkm/subdev/i2c/aux.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgf119.o
nvkm-y += nvkm/subdev/i2c/auxgm200.o
nvkm-y += nvkm/subdev/i2c/anx9805.o
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 *size)
{
+ if (!*size && !aux->func->address_only) {
+ AUX_ERR(aux, "address-only transaction dropped");
+ return -ENOSYS;
+ }
return aux->func->xfer(aux, retry, type, addr, data, size);
}
#include "pad.h"
struct nvkm_i2c_aux_func {
+ bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
+int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int, u8, struct nvkm_i2c_aux **);
+
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
+int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
#define AUX_MSG(b,l,f,a...) do { \
return 0;
}
-static int
+int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
}
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nvkm_i2c_aux_func
-g94_i2c_aux_func = {
- .xfer = g94_i2c_aux_xfer,
-};
-
int
-g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
- struct nvkm_i2c_aux **paux)
+g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
{
struct g94_i2c_aux *aux;
return -ENOMEM;
*paux = &aux->base;
- nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+ nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux = {
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
+}
--- /dev/null
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "aux.h"
+
+static const struct nvkm_i2c_aux_func
+gf119_i2c_aux = {
+ .address_only = true,
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
+}
}
ctrl = nvkm_rd32(device, 0x00d954 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
static const struct nvkm_i2c_aux_func
gm200_i2c_aux_func = {
+ .address_only = true,
.xfer = gm200_i2c_aux_xfer,
};
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
};
int
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
+ select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
+ select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
This driver provides kernel mode setting and buffer
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+if DRM_ROCKCHIP
+
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
- depends on DRM_ROCKCHIP
- select DRM_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
- depends on DRM_ROCKCHIP
- depends on EXTCON
- select SND_SOC_HDMI_CODEC if SND_SOC
+ depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
- depends on DRM_ROCKCHIP
- select DRM_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_ROCKCHIP
- select DRM_MIPI_DSI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
- depends on DRM_ROCKCHIP
help
This selects support for Rockchip SoC specific extensions
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+
+endif
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
-static struct ttm_place vram_placement_flags = {
+static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place vram_ne_placement_flags = {
+static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place sys_placement_flags = {
+static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place sys_ne_placement_flags = {
+static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place gmr_placement_flags = {
+static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
-static struct ttm_place gmr_ne_placement_flags = {
+static const struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place mob_placement_flags = {
+static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
-static struct ttm_place mob_ne_placement_flags = {
+static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
.busy_placement = &vram_placement_flags
};
-static struct ttm_place vram_gmr_placement_flags[] = {
+static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
}
};
-static struct ttm_place gmr_vram_placement_flags[] = {
+static const struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.busy_placement = &gmr_placement_flags
};
-static struct ttm_place vram_gmr_ne_placement_flags[] = {
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.busy_placement = &sys_ne_placement_flags
};
-static struct ttm_place evictable_placement_flags[] = {
+static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
if (ret)
return ret;
- header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
- &header->handle);
+ header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
+ &header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;