WARNING: Forcing ASPM on may cause system lockups.
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
- off Do not use native PCIe PME signaling.
+ Format: {auto|force}[,nomsi]
+ auto Use native PCIe PME signaling if the BIOS allows the
+ kernel to control PCIe config registers of root ports.
force Use native PCIe PME signaling even if the BIOS refuses
to allow the kernel to control the relevant PCIe config
registers.
COMMON INTERNET FILE SYSTEM (CIFS)
M: Steve French <sfrench@samba.org>
-L: linux-cifs-client@lists.samba.org (moderated for non-subscribers)
+L: linux-cifs@vger.kernel.org
L: samba-technical@lists.samba.org (moderated for non-subscribers)
W: http://linux-cifs.samba.org/
Q: http://patchwork.ozlabs.org/project/linux-cifs-client/list/
S: Maintained
F: drivers/input/
+INPUT MULTITOUCH (MT) PROTOCOL
+M: Henrik Rydberg <rydberg@euromail.se>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: Documentation/input/multi-touch-protocol.txt
+K: \b(ABS|SYN)_MT_
+
INTEL IDLE DRIVER
M: Len Brown <lenb@kernel.org>
L: linux-pm@lists.linux-foundation.org
M: Grant Likely <grant.likely@secretlab.ca>
L: devicetree-discuss@lists.ozlabs.org
W: http://fdt.secretlab.ca
+T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
F: drivers/of
F: include/linux/of*.h
M: Grant Likely <grant.likely@secretlab.ca>
L: spi-devel-general@lists.sourceforge.net
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
+T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
F: Documentation/spi/
F: drivers/spi/
localver-extra = $(scm-identifier)
else
ifneq ($(scm-identifier),)
- ifeq ($(LOCALVERSION),)
+ ifeq ("$(origin LOCALVERSION)", "undefined")
localver-extra = +
endif
endif
return __kernel_ctpop(w);
}
-static inline unsigned int __arch_weight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
{
return __arch_hweight64(w);
}
obj-y += irq_pyxis.o irq_i8259.o irq_srm.o
obj-y += err_ev6.o
-obj-y += es1888.o smc37c669.o smc37c93x.o ns87312.o gct.o
+obj-y += es1888.o smc37c669.o smc37c93x.o pc873xx.o gct.o
obj-y += srmcons.o
else
# Board support
obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o
obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \
- ns87312.o
+ pc873xx.o
obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
- ns87312.o
+ pc873xx.o
obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \
- ns87312.o
+ pc873xx.o
obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
smc37c93x.o
obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
-obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
-obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
-obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
-obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
-obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
+obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
+obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
+obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
+obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \
irq_srm.o smc37c669.o
-obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o ns87312.o
+obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o pc873xx.o
obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o
# Error support
+++ /dev/null
-/*
- * linux/arch/alpha/kernel/ns87312.c
- */
-
-#include <linux/init.h>
-#include <asm/io.h>
-#include "proto.h"
-
-
-/*
- * The SRM console *disables* the IDE interface, this code ensures it's
- * enabled.
- *
- * This code bangs on a control register of the 87312 Super I/O chip
- * that implements parallel port/serial ports/IDE/FDI. Depending on
- * the motherboard, the Super I/O chip can be configured through a
- * pair of registers that are located either at I/O ports 0x26e/0x26f
- * or 0x398/0x399. Unfortunately, autodetecting which base address is
- * in use works only once (right after a reset). The Super I/O chip
- * has the additional quirk that configuration register data must be
- * written twice (I believe this is a safety feature to prevent
- * accidental modification---fun, isn't it?).
- */
-
-void __init
-ns87312_enable_ide(long ide_base)
-{
- int data;
- unsigned long flags;
-
- local_irq_save(flags);
- outb(0, ide_base); /* set the index register for reg #0 */
- data = inb(ide_base+1); /* read the current contents */
- outb(0, ide_base); /* set the index register for reg #0 */
- outb(data | 0x40, ide_base+1); /* turn on IDE */
- outb(data | 0x40, ide_base+1); /* turn on IDE, really! */
- local_irq_restore(flags);
-}
--- /dev/null
+#include <linux/ioport.h>
+#include <asm/io.h>
+
+#include "pc873xx.h"
+
+static unsigned pc873xx_probelist[] = {0x398, 0x26e, 0};
+
+static char *pc873xx_names[] = {
+ "PC87303", "PC87306", "PC87312", "PC87332", "PC87334"
+};
+
+static unsigned int base, model;
+
+
+unsigned int __init pc873xx_get_base()
+{
+ return base;
+}
+
+char *__init pc873xx_get_model()
+{
+ return pc873xx_names[model];
+}
+
+static unsigned char __init pc873xx_read(unsigned int base, int reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void __init pc873xx_write(unsigned int base, int reg, unsigned char data)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ outb(reg, base);
+ outb(data, base + 1);
+ outb(data, base + 1); /* Must be written twice */
+ local_irq_restore(flags);
+}
+
+int __init pc873xx_probe(void)
+{
+ int val, index = 0;
+
+ while ((base = pc873xx_probelist[index++])) {
+
+ if (request_region(base, 2, "Super IO PC873xx") == NULL)
+ continue;
+
+ val = pc873xx_read(base, REG_SID);
+ if ((val & 0xf0) == 0x10) {
+ model = PC87332;
+ break;
+ } else if ((val & 0xf8) == 0x70) {
+ model = PC87306;
+ break;
+ } else if ((val & 0xf8) == 0x50) {
+ model = PC87334;
+ break;
+ } else if ((val & 0xf8) == 0x40) {
+ model = PC87303;
+ break;
+ }
+
+ release_region(base, 2);
+ }
+
+ return (base == 0) ? -1 : 1;
+}
+
+void __init pc873xx_enable_epp19(void)
+{
+ unsigned char data;
+
+ printk(KERN_INFO "PC873xx enabling EPP v1.9\n");
+ data = pc873xx_read(base, REG_PCR);
+ pc873xx_write(base, REG_PCR, (data & 0xFC) | 0x02);
+}
+
+void __init pc873xx_enable_ide(void)
+{
+ unsigned char data;
+
+ printk(KERN_INFO "PC873xx enabling IDE interrupt\n");
+ data = pc873xx_read(base, REG_FER);
+ pc873xx_write(base, REG_FER, data | 0x40);
+}
--- /dev/null
+
+#ifndef _PC873xx_H_
+#define _PC873xx_H_
+
+/*
+ * Control Register Values
+ */
+#define REG_FER 0x00
+#define REG_FAR 0x01
+#define REG_PTR 0x02
+#define REG_FCR 0x03
+#define REG_PCR 0x04
+#define REG_KRR 0x05
+#define REG_PMC 0x06
+#define REG_TUP 0x07
+#define REG_SID 0x08
+#define REG_ASC 0x09
+#define REG_IRC 0x0e
+
+/*
+ * Model numbers
+ */
+#define PC87303 0
+#define PC87306 1
+#define PC87312 2
+#define PC87332 3
+#define PC87334 4
+
+int pc873xx_probe(void);
+unsigned int pc873xx_get_base(void);
+char *pc873xx_get_model(void);
+void pc873xx_enable_epp19(void);
+void pc873xx_enable_ide(void);
+
+#endif
/**
* pci_mmap_resource - map a PCI resource into user memory space
- * @filp: open sysfs file
* @kobj: kobject for mapping
* @attr: struct bin_attribute for the file being mapped
* @vma: struct vm_area_struct passed into the mmap
*
* Use the bus mapping routines to map a PCI resource into userspace.
*/
-static int pci_mmap_resource(struct file *filp, struct kobject *kobj,
+static int pci_mmap_resource(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma, int sparse)
{
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
+#include "pc873xx.h"
#if defined(ALPHA_RESTORE_SRM_SETUP)
/* Save LCA configuration data as the console had it set up. */
common_init_pci();
sio_pci_route();
sio_fixup_irq_levels(sio_collect_irq_levels());
- ns87312_enable_ide(0x26e);
+
+ if (pc873xx_probe() == -1) {
+ printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+ } else {
+ printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+ pc873xx_get_model(), pc873xx_get_base());
+
+ /* Enabling things in the Super IO chip doesn't actually
+ * configure and enable things, the legacy drivers still
+ * need to do the actual configuration and enabling.
+ * This only unblocks them.
+ */
+
+#if !defined(CONFIG_ALPHA_AVANTI)
+ /* Don't bother on the Avanti family.
+ * None of them had on-board IDE.
+ */
+ pc873xx_enable_ide();
+#endif
+ pc873xx_enable_epp19();
+ }
}
static inline void __init
depends on KGDB || XMON
default y
-config IRQSTACKS
- bool "Use separate kernel stacks when processing interrupts"
- help
- If you say Y here the kernel will use separate kernel stacks
- for handling hard and soft interrupts. This can help avoid
- overflowing the process kernel stacks.
-
config VIRQ_DEBUG
bool "Expose hardware/virtual IRQ mapping via debugfs"
depends on DEBUG_FS
$(obj)/wrapper.a: $(obj-wlib) FORCE
$(call if_changed,bootar)
-hostprogs-y := addnote addRamDisk hack-coff mktree
+hostprogs-y := addnote hack-coff mktree
targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
+++ /dev/null
-#include <stdio.h>
-#include <stdlib.h>
-#include <netinet/in.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <string.h>
-#include <elf.h>
-
-#define ElfHeaderSize (64 * 1024)
-#define ElfPages (ElfHeaderSize / 4096)
-#define KERNELBASE (0xc000000000000000)
-#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
-
-struct addr_range {
- unsigned long long addr;
- unsigned long memsize;
- unsigned long offset;
-};
-
-static int check_elf64(void *p, int size, struct addr_range *r)
-{
- Elf64_Ehdr *elf64 = p;
- Elf64_Phdr *elf64ph;
-
- if (elf64->e_ident[EI_MAG0] != ELFMAG0 ||
- elf64->e_ident[EI_MAG1] != ELFMAG1 ||
- elf64->e_ident[EI_MAG2] != ELFMAG2 ||
- elf64->e_ident[EI_MAG3] != ELFMAG3 ||
- elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
- elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
- elf64->e_type != ET_EXEC || elf64->e_machine != EM_PPC64)
- return 0;
-
- if ((elf64->e_phoff + sizeof(Elf64_Phdr)) > size)
- return 0;
-
- elf64ph = (Elf64_Phdr *) ((unsigned long)elf64 +
- (unsigned long)elf64->e_phoff);
-
- r->memsize = (unsigned long)elf64ph->p_memsz;
- r->offset = (unsigned long)elf64ph->p_offset;
- r->addr = (unsigned long long)elf64ph->p_vaddr;
-
-#ifdef DEBUG
- printf("PPC64 ELF file, ph:\n");
- printf("p_type 0x%08x\n", elf64ph->p_type);
- printf("p_flags 0x%08x\n", elf64ph->p_flags);
- printf("p_offset 0x%016llx\n", elf64ph->p_offset);
- printf("p_vaddr 0x%016llx\n", elf64ph->p_vaddr);
- printf("p_paddr 0x%016llx\n", elf64ph->p_paddr);
- printf("p_filesz 0x%016llx\n", elf64ph->p_filesz);
- printf("p_memsz 0x%016llx\n", elf64ph->p_memsz);
- printf("p_align 0x%016llx\n", elf64ph->p_align);
- printf("... skipping 0x%08lx bytes of ELF header\n",
- (unsigned long)elf64ph->p_offset);
-#endif
-
- return 64;
-}
-static void get4k(FILE *file, char *buf )
-{
- unsigned j;
- unsigned num = fread(buf, 1, 4096, file);
- for ( j=num; j<4096; ++j )
- buf[j] = 0;
-}
-
-static void put4k(FILE *file, char *buf )
-{
- fwrite(buf, 1, 4096, file);
-}
-
-static void death(const char *msg, FILE *fdesc, const char *fname)
-{
- fprintf(stderr, msg);
- fclose(fdesc);
- unlink(fname);
- exit(1);
-}
-
-int main(int argc, char **argv)
-{
- char inbuf[4096];
- struct addr_range vmlinux;
- FILE *ramDisk;
- FILE *inputVmlinux;
- FILE *outputVmlinux;
-
- char *rd_name, *lx_name, *out_name;
-
- size_t i;
- unsigned long ramFileLen;
- unsigned long ramLen;
- unsigned long roundR;
- unsigned long offset_end;
-
- unsigned long kernelLen;
- unsigned long actualKernelLen;
- unsigned long round;
- unsigned long roundedKernelLen;
- unsigned long ramStartOffs;
- unsigned long ramPages;
- unsigned long roundedKernelPages;
- unsigned long hvReleaseData;
- u_int32_t eyeCatcher = 0xc8a5d9c4;
- unsigned long naca;
- unsigned long xRamDisk;
- unsigned long xRamDiskSize;
- long padPages;
-
-
- if (argc < 2) {
- fprintf(stderr, "Name of RAM disk file missing.\n");
- exit(1);
- }
- rd_name = argv[1];
-
- if (argc < 3) {
- fprintf(stderr, "Name of vmlinux file missing.\n");
- exit(1);
- }
- lx_name = argv[2];
-
- if (argc < 4) {
- fprintf(stderr, "Name of vmlinux output file missing.\n");
- exit(1);
- }
- out_name = argv[3];
-
-
- ramDisk = fopen(rd_name, "r");
- if ( ! ramDisk ) {
- fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", rd_name);
- exit(1);
- }
-
- inputVmlinux = fopen(lx_name, "r");
- if ( ! inputVmlinux ) {
- fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", lx_name);
- exit(1);
- }
-
- outputVmlinux = fopen(out_name, "w+");
- if ( ! outputVmlinux ) {
- fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", out_name);
- exit(1);
- }
-
- i = fread(inbuf, 1, sizeof(inbuf), inputVmlinux);
- if (i != sizeof(inbuf)) {
- fprintf(stderr, "can not read vmlinux file %s: %u\n", lx_name, i);
- exit(1);
- }
-
- i = check_elf64(inbuf, sizeof(inbuf), &vmlinux);
- if (i == 0) {
- fprintf(stderr, "You must have a linux kernel specified as argv[2]\n");
- exit(1);
- }
-
- /* Input Vmlinux file */
- fseek(inputVmlinux, 0, SEEK_END);
- kernelLen = ftell(inputVmlinux);
- fseek(inputVmlinux, 0, SEEK_SET);
- printf("kernel file size = %lu\n", kernelLen);
-
- actualKernelLen = kernelLen - ElfHeaderSize;
-
- printf("actual kernel length (minus ELF header) = %lu\n", actualKernelLen);
-
- round = actualKernelLen % 4096;
- roundedKernelLen = actualKernelLen;
- if ( round )
- roundedKernelLen += (4096 - round);
- printf("Vmlinux length rounded up to a 4k multiple = %ld/0x%lx \n", roundedKernelLen, roundedKernelLen);
- roundedKernelPages = roundedKernelLen / 4096;
- printf("Vmlinux pages to copy = %ld/0x%lx \n", roundedKernelPages, roundedKernelPages);
-
- offset_end = _ALIGN_UP(vmlinux.memsize, 4096);
- /* calc how many pages we need to insert between the vmlinux and the start of the ram disk */
- padPages = offset_end/4096 - roundedKernelPages;
-
- /* Check and see if the vmlinux is already larger than _end in System.map */
- if (padPages < 0) {
- /* vmlinux is larger than _end - adjust the offset to the start of the embedded ram disk */
- offset_end = roundedKernelLen;
- printf("vmlinux is larger than _end indicates it needs to be - offset_end = %lx \n", offset_end);
- padPages = 0;
- printf("will insert %lx pages between the vmlinux and the start of the ram disk \n", padPages);
- }
- else {
- /* _end is larger than vmlinux - use the offset to _end that we calculated from the system map */
- printf("vmlinux is smaller than _end indicates is needed - offset_end = %lx \n", offset_end);
- printf("will insert %lx pages between the vmlinux and the start of the ram disk \n", padPages);
- }
-
-
-
- /* Input Ram Disk file */
- // Set the offset that the ram disk will be started at.
- ramStartOffs = offset_end; /* determined from the input vmlinux file and the system map */
- printf("Ram Disk will start at offset = 0x%lx \n", ramStartOffs);
-
- fseek(ramDisk, 0, SEEK_END);
- ramFileLen = ftell(ramDisk);
- fseek(ramDisk, 0, SEEK_SET);
- printf("%s file size = %ld/0x%lx \n", rd_name, ramFileLen, ramFileLen);
-
- ramLen = ramFileLen;
-
- roundR = 4096 - (ramLen % 4096);
- if ( roundR ) {
- printf("Rounding RAM disk file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
- ramLen += roundR;
- }
-
- printf("Rounded RAM disk size is %ld/0x%lx \n", ramLen, ramLen);
- ramPages = ramLen / 4096;
- printf("RAM disk pages to copy = %ld/0x%lx\n", ramPages, ramPages);
-
-
-
- // Copy 64K ELF header
- for (i=0; i<(ElfPages); ++i) {
- get4k( inputVmlinux, inbuf );
- put4k( outputVmlinux, inbuf );
- }
-
- /* Copy the vmlinux (as full pages). */
- fseek(inputVmlinux, ElfHeaderSize, SEEK_SET);
- for ( i=0; i<roundedKernelPages; ++i ) {
- get4k( inputVmlinux, inbuf );
- put4k( outputVmlinux, inbuf );
- }
-
- /* Insert pad pages (if appropriate) that are needed between */
- /* | the end of the vmlinux and the ram disk. */
- for (i=0; i<padPages; ++i) {
- memset(inbuf, 0, 4096);
- put4k(outputVmlinux, inbuf);
- }
-
- /* Copy the ram disk (as full pages). */
- for ( i=0; i<ramPages; ++i ) {
- get4k( ramDisk, inbuf );
- put4k( outputVmlinux, inbuf );
- }
-
- /* Close the input files */
- fclose(ramDisk);
- fclose(inputVmlinux);
- /* And flush the written output file */
- fflush(outputVmlinux);
-
-
-
- /* Fixup the new vmlinux to contain the ram disk starting offset (xRamDisk) and the ram disk size (xRamDiskSize) */
- /* fseek to the hvReleaseData pointer */
- fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
- if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
- death("Could not read hvReleaseData pointer\n", outputVmlinux, out_name);
- }
- hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
- printf("hvReleaseData is at %08lx\n", hvReleaseData);
-
- /* fseek to the hvReleaseData */
- fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
- if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
- death("Could not read hvReleaseData\n", outputVmlinux, out_name);
- }
- /* Check hvReleaseData sanity */
- if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
- death("hvReleaseData is invalid\n", outputVmlinux, out_name);
- }
- /* Get the naca pointer */
- naca = ntohl(*((u_int32_t*) &inbuf[0x0C])) - KERNELBASE;
- printf("Naca is at offset 0x%lx \n", naca);
-
- /* fseek to the naca */
- fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
- if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
- death("Could not read naca\n", outputVmlinux, out_name);
- }
- xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
- xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
- /* Make sure a RAM disk isn't already present */
- if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
- death("RAM disk is already attached to this kernel\n", outputVmlinux, out_name);
- }
- /* Fill in the values */
- *((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
- *((u_int32_t *) &inbuf[0x14]) = htonl(ramPages);
-
- /* Write out the new naca */
- fflush(outputVmlinux);
- fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
- if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
- death("Could not write naca\n", outputVmlinux, out_name);
- }
- printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08lx\n",
- ramPages, ramStartOffs);
-
- /* Done */
- fclose(outputVmlinux);
- /* Set permission to executable */
- chmod(out_name, S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
-
- return 0;
-}
-
compatible = "fsl,mpc5200-gpio";
reg = <0xb00 0x40>;
interrupts = <1 7 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
gpio@c00 {
compatible = "fsl,mpc5200-gpio-wkup";
reg = <0xc00 0x40>;
interrupts = <1 8 0 0 3 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
spi@f00 {
reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts
interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co.
- phy0: ethernet-phy@1 {
- reg = <1>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
};
};
compatible = "fsl,mpc5200-i2c","fsl-i2c";
reg = <0x3d40 0x40>;
interrupts = <2 16 0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
};
+
sram@8000 {
compatible = "fsl,mpc5200-sram";
reg = <0x8000 0x4000>;
0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
+
+ localbus {
+ compatible = "fsl,mpc5200-lpb","simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ranges = <0 0 0xff000000 0x01000000>;
+
+ flash@0,0 {
+ compatible = "amd,am29lv652d", "cfi-flash";
+ reg = <0 0 0x01000000>;
+ bank-width = <1>;
+ };
+ };
};
compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
reg = <0x3d40 0x40>;
interrupts = <2 16 0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
};
sram@8000 {
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_LOG_BUF_SHIFT=15
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUP_CPUACCT is not set
# CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_PROC_PID_CPUSET=y
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_GROUP_SCHED is not set
# CONFIG_USER_SCHED is not set
# CONFIG_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_LOG_BUF_SHIFT=15
# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
# CONFIG_FAIR_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=16
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUP_CPUACCT is not set
# CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_PROC_PID_CPUSET=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_PROC_PID_CPUSET=y
# CONFIG_CGROUP_CPUACCT is not set
# CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_GROUP_SCHED is not set
CONFIG_CGROUP_CPUACCT=y
# CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_PROC_PID_CPUSET=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
#define exc_lvl_ctx_init()
#endif
-#ifdef CONFIG_IRQSTACKS
/*
* Per-cpu stacks for handling hard and soft interrupts.
*/
extern void call_do_softirq(struct thread_info *tp);
extern int call_handle_irq(int irq, void *p1,
struct thread_info *tp, void *func);
-#else
-#define irq_ctx_init()
-
-#endif /* CONFIG_IRQSTACKS */
-
extern void do_IRQ(struct pt_regs *regs);
#endif /* _ASM_IRQ_H */
#include <asm/page.h>
-/* Kdump kernel runs at 32 MB, change at your peril. */
+/*
+ * If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere.
+ * To keep enough space in the RMO for the first stage kernel on 64bit, we
+ * place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place
+ * the second stage at 32MB.
+ */
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64)
+#define KDUMP_KERNELBASE 0x4000000
+#else
#define KDUMP_KERNELBASE 0x2000000
+#endif
/* How many bytes to reserve at zero for kdump. The reserve limit should
* be greater or equal to the trampoline's end address.
}
#endif
-#ifdef CONFIG_IRQSTACKS
static inline void handle_one_irq(unsigned int irq)
{
struct thread_info *curtp, *irqtp;
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
-#else
-static inline void handle_one_irq(unsigned int irq)
-{
- generic_handle_irq(irq);
-}
-#endif
static inline void check_stack_overflow(void)
{
}
#endif
-#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
irqtp->task = NULL;
}
-#else
-#define do_softirq_onstack() __do_softirq()
-#endif /* CONFIG_IRQSTACKS */
-
void do_softirq(void)
{
unsigned long flags;
my_cpu = get_cpu();
/* Make sure each CPU has atleast made it to the state we need */
- for (i=0; i < NR_CPUS; i++) {
+ for_each_online_cpu(i) {
if (i == my_cpu)
continue;
while (paca[i].kexec_state < wait_state) {
barrier();
- if (!cpu_possible(i)) {
- printk("kexec: cpu %d hw_cpu_id %d is not"
- " possible, ignoring\n",
- i, paca[i].hw_cpu_id);
- break;
- }
- if (!cpu_online(i)) {
- /* Fixme: this can be spinning in
- * pSeries_secondary_wait with a paca
- * waiting for it to go online.
- */
- printk("kexec: cpu %d hw_cpu_id %d is not"
- " online, ignoring\n",
- i, paca[i].hw_cpu_id);
- break;
- }
if (i != notified) {
printk( "kexec: waiting for cpu %d (physical"
" %d) to enter %i state\n",
.text
-#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
lwz r0,4(r1)
mtlr r0
blr
-#endif /* CONFIG_IRQSTACKS */
/*
* This returns the high 64 bits of the product of two 64-bit numbers.
.text
-#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
ld r0,16(r1)
mtlr r0
blr
-#endif /* CONFIG_IRQSTACKS */
.section ".toc","aw"
PPC64_CACHES:
return error;
}
-#ifdef CONFIG_IRQSTACKS
static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
return 0;
}
-#else
-#define valid_irq_stack(sp, p, nb) 0
-#endif /* CONFIG_IRQSTACKS */
-
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
struct flash_block_list *next;
struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
};
-struct flash_block_list_header { /* just the header of flash_block_list */
- unsigned long num_blocks;
- struct flash_block_list *next;
-};
-static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
+static struct flash_block_list *rtas_firmware_flash_list;
/* Use slab cache to guarantee 4k alignment */
static struct kmem_cache *flash_block_cache = NULL;
/* Local copy of the flash block list.
* We only allow one open of the flash proc file and create this
- * list as we go. This list will be put in the
- * rtas_firmware_flash_list var once it is fully read.
+ * list as we go. The rtas_firmware_flash_list varable will be
+ * set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
* we do not fill in the version number, and the length field
* is treated as the number of entries currently in the block
- * (i.e. not a byte count). This is all fixed on release.
+ * (i.e. not a byte count). This is all fixed when calling
+ * the flash routine.
*/
/* Status int must be first member of struct */
if (uf->flist) {
/* File was opened in write mode for a new flash attempt */
/* Clear saved list */
- if (rtas_firmware_flash_list.next) {
- free_flash_list(rtas_firmware_flash_list.next);
- rtas_firmware_flash_list.next = NULL;
+ if (rtas_firmware_flash_list) {
+ free_flash_list(rtas_firmware_flash_list);
+ rtas_firmware_flash_list = NULL;
}
if (uf->status != FLASH_AUTH)
uf->status = flash_list_valid(uf->flist);
if (uf->status == FLASH_IMG_READY)
- rtas_firmware_flash_list.next = uf->flist;
+ rtas_firmware_flash_list = uf->flist;
else
free_flash_list(uf->flist);
unsigned long rtas_block_list;
int i, status, update_token;
- if (rtas_firmware_flash_list.next == NULL)
+ if (rtas_firmware_flash_list == NULL)
return; /* nothing to do */
if (reboot_type != SYS_RESTART) {
return;
}
- /* NOTE: the "first" block list is a global var with no data
- * blocks in the kernel data segment. We do this because
- * we want to ensure this block_list addr is under 4GB.
+ /*
+ * NOTE: the "first" block must be under 4GB, so we create
+ * an entry with no data blocks in the reserved buffer in
+ * the kernel data segment.
*/
- rtas_firmware_flash_list.num_blocks = 0;
- flist = (struct flash_block_list *)&rtas_firmware_flash_list;
+ spin_lock(&rtas_data_buf_lock);
+ flist = (struct flash_block_list *)&rtas_data_buf[0];
+ flist->num_blocks = 0;
+ flist->next = rtas_firmware_flash_list;
rtas_block_list = virt_to_abs(flist);
if (rtas_block_list >= 4UL*1024*1024*1024) {
printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
+ spin_unlock(&rtas_data_buf_lock);
return;
}
printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
/* Update the block_list in place. */
+ rtas_firmware_flash_list = NULL; /* too hard to backout on error */
image_size = 0;
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
break;
}
+ spin_unlock(&rtas_data_buf_lock);
}
static void remove_flash_pde(struct proc_dir_entry *dp)
arch_initcall(ppc_init);
-#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
unsigned int i;
__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
}
}
-#else
-#define irqstack_early_init()
-#endif
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
static void __init exc_lvl_early_init(void)
return 1UL << SID_SHIFT;
}
-#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
u64 limit = slb0_limit();
THREAD_SIZE, limit));
}
}
-#else
-#define irqstack_early_init()
-#endif
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
struct page *ptepage;
-#ifdef CONFIG_HIGHPTE
- gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
-#else
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
-#endif
ptepage = alloc_pages(flags, 0);
if (!ptepage)
{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
{}
};
+ struct resource res;
/* map the whole register space */
np = of_find_matching_node(NULL, immr_ids);
- mbar = of_iomap(np, 0);
+
+ if (of_address_to_resource(np, 0, &res)) {
+ pr_err("mpc52xx_pm_prepare(): could not get IMMR address\n");
+ of_node_put(np);
+ return -ENOSYS;
+ }
+
+ mbar = ioremap(res.start, 0xc000); /* we should map whole region including SRAM */
+
of_node_put(np);
if (!mbar) {
pr_err("mpc52xx_pm_prepare(): could not map registers\n");
mpic->save_data[i].dest);
#ifdef CONFIG_MPIC_U3_HT_IRQS
- {
+ if (mpic->fixups) {
struct mpic_irq_fixup *fixup = &mpic->fixups[i];
if (fixup->base) {
#include <linux/spi/spi.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/mmc_spi.h>
-#include <linux/mmc/host.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <video/sh_mobile_lcdc.h>
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (!dev->resource[i].start)
continue;
- if (dev->resource[i].flags & IORESOURCE_PCI_FIXED)
- continue;
if (dev->resource[i].flags & IORESOURCE_IO)
offset = hose->io_offset;
else if (dev->resource[i].flags & IORESOURCE_MEM)
spin_unlock_irqrestore(&pmbe->lock, flags);
}
- read_lock(&pmb_rwlock);
+ read_unlock(&pmb_rwlock);
}
#endif
struct e820entry *ei = &e820.map[i];
if (ei->type == E820_NVS)
- hibernate_nvs_register(ei->addr, ei->size);
+ suspend_nvs_register(ei->addr, ei->size);
}
return 0;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
+static unsigned char lapic_detected_unstable;
+static unsigned char lapic_marked_unstable;
static void power_saving_mwait_init(void)
{
power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
(highest_subcstate - 1);
- for_each_online_cpu(i)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
-
#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
- if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- return;
-
- /*FALL THROUGH*/
+ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ tsc_detected_unstable = 1;
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
+ lapic_detected_unstable = 1;
+ break;
default:
- /* TSC could halt in idle */
+ /* TSC & LAPIC could halt in idle */
tsc_detected_unstable = 1;
+ lapic_detected_unstable = 1;
}
#endif
}
mark_tsc_unstable("TSC halts in idle");
tsc_marked_unstable = 1;
}
+ if (lapic_detected_unstable && !lapic_marked_unstable) {
+ int i;
+ /* LAPIC could halt in idle, so notify users */
+ for_each_online_cpu(i)
+ clockevents_notify(
+ CLOCK_EVT_NOTIFY_BROADCAST_ON,
+ &i);
+ lapic_marked_unstable = 1;
+ }
local_irq_disable();
cpu = smp_processor_id();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
- &cpu);
+ if (lapic_marked_unstable)
+ clockevents_notify(
+ CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings();
__monitor((void *)¤t_thread_info()->flags, 0, 0);
__mwait(power_saving_mwait_eax, 1);
start_critical_timings();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
- &cpu);
+ if (lapic_marked_unstable)
+ clockevents_notify(
+ CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
local_irq_enable();
if (jiffies > expire_time) {
#define ACPI_MAX_LOOP_ITERATIONS 0xFFFF
+/* Maximum sleep allowed via Sleep() operator */
+
+#define ACPI_MAX_SLEEP 20000 /* Two seconds */
+
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+/*
+ * Optionally truncate I/O addresses to 16 bits. Provides compatibility
+ * with other ACPI implementations. NOTE: During ACPICA initialization,
+ * this value is set to TRUE if any Windows OSI strings have been
+ * requested by the BIOS.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
/*
* hwgpe - GPE support
*/
-acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
+ struct acpi_gpe_register_info *gpe_register_info);
+
+acpi_status
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
acpi_status
acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
- u8 register_bit;
+ u32 register_bit;
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
return_ACPI_STATUS(AE_NOT_EXIST);
}
- register_bit = (u8)
- (1 <<
- (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+ register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ gpe_register_info);
/* Clear the wake/run bits up front */
return_ACPI_STATUS(AE_OK);
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_enable_gpe
- *
- * PARAMETERS: gpe_event_info - GPE to enable
- *
- * RETURN: Status
- *
- * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
- * of type or number of references.
- *
- * Note: The GPE lock should be already acquired when this function is called.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
-{
- acpi_status status;
-
-
- ACPI_FUNCTION_TRACE(ev_enable_gpe);
-
-
- /*
- * We will only allow a GPE to be enabled if it has either an
- * associated method (_Lxx/_Exx) or a handler. Otherwise, the
- * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
- * first time it fires.
- */
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
- return_ACPI_STATUS(AE_NO_HANDLER);
- }
-
- /* Ensure the HW enable masks are current */
-
- status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Clear the GPE (of stale events) */
-
- status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Enable the requested GPE */
-
- status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_disable_gpe
- *
- * PARAMETERS: gpe_event_info - GPE to disable
- *
- * RETURN: Status
- *
- * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
- * regardless of the type or number of references.
- *
- * Note: The GPE lock should be already acquired when this function is called.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_disable_gpe);
-
-
- /*
- * Note: Always disable the GPE, even if we think that that it is already
- * disabled. It is possible that the AML or some other code has enabled
- * the GPE behind our back.
- */
-
- /* Ensure the HW enable masks are current */
-
- status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Always H/W disable this GPE, even if we don't know the GPE type.
- * Simply clear the enable bit for this particular GPE, but do not
- * write out the current GPE enable mask since this may inadvertently
- * enable GPEs too early. An example is a rogue GPE that has arrived
- * during ACPICA initialization - possibly because AML or other code
- * has enabled the GPE.
- */
- status = acpi_hw_low_disable_gpe(gpe_event_info);
- return_ACPI_STATUS(status);
-}
-
/*******************************************************************************
*
return_VOID;
}
- /* Update the GPE register masks for return to enabled state */
-
- (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
-
/*
* Take a snapshot of the GPE info for this level - we copy the info to
* prevent a race condition with remove_handler/remove_block.
* Disable the GPE, so it doesn't keep firing before the method has a
* chance to run (it runs asynchronously with interrupts enabled).
*/
- status = acpi_ev_disable_gpe(gpe_event_info);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[0x%2X]",
* Disable the GPE. The GPE will remain disabled a handler
* is installed or ACPICA is restarted.
*/
- status = acpi_ev_disable_gpe(gpe_event_info);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[0x%2X]",
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
+ gpe_number = gpe_index + gpe_block->block_base_number;
+
+ /*
+ * If the GPE has already been enabled for runtime
+ * signaling, make sure it remains enabled, but do not
+ * increment its reference counter.
+ */
+ if (gpe_event_info->runtime_count) {
+ acpi_set_gpe(gpe_device, gpe_number,
+ ACPI_GPE_ENABLE);
+ gpe_enabled_count++;
+ continue;
+ }
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
/* Enable this GPE */
- gpe_number = gpe_index + gpe_block->block_base_number;
status = acpi_enable_gpe(gpe_device, gpe_number,
ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
- /* Disable the GPE before installing the handler */
-
- status = acpi_ev_disable_gpe(gpe_event_info);
- if (ACPI_FAILURE (status)) {
- goto unlock_and_exit;
- }
-
/* Install the handler */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
ACPI_EXPORT_SYMBOL(acpi_enable_event)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_clear_and_enable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to enable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear the given GPE from stale events and enable it.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ acpi_status status;
+
+ /*
+ * We will only allow a GPE to be enabled if it has either an
+ * associated method (_Lxx/_Exx) or a handler. Otherwise, the
+ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
+ * first time it fires.
+ */
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ return_ACPI_STATUS(AE_NO_HANDLER);
+ }
+
+ /* Clear the GPE (of stale events) */
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Enable the requested GPE */
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+
+ return_ACPI_STATUS(status);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_set_gpe
switch (action) {
case ACPI_GPE_ENABLE:
- status = acpi_ev_enable_gpe(gpe_event_info);
+ status = acpi_clear_and_enable_gpe(gpe_event_info);
break;
case ACPI_GPE_DISABLE:
- status = acpi_ev_disable_gpe(gpe_event_info);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
break;
default:
gpe_event_info->runtime_count++;
if (gpe_event_info->runtime_count == 1) {
- status = acpi_ev_enable_gpe(gpe_event_info);
+ status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_clear_and_enable_gpe(gpe_event_info);
+ }
+
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
goto unlock_and_exit;
*/
gpe_event_info->wakeup_count++;
if (gpe_event_info->wakeup_count == 1) {
- (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
}
}
gpe_event_info->runtime_count--;
if (!gpe_event_info->runtime_count) {
- status = acpi_ev_disable_gpe(gpe_event_info);
+ status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_hw_low_set_gpe(gpe_event_info,
+ ACPI_GPE_DISABLE);
+ }
+
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count++;
goto unlock_and_exit;
gpe_event_info->wakeup_count--;
if (!gpe_event_info->wakeup_count) {
- (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
}
}
acpi_ex_relinquish_interpreter();
+ /*
+ * For compatibility with other ACPI implementations and to prevent
+ * accidental deep sleeps, limit the sleep time to something reasonable.
+ */
+ if (how_long > ACPI_MAX_SLEEP) {
+ how_long = ACPI_MAX_SLEEP;
+ }
+
acpi_os_sleep(how_long);
/* And now we must get the interpreter again */
/******************************************************************************
*
- * FUNCTION: acpi_hw_low_disable_gpe
+ * FUNCTION: acpi_hw_gpe_register_bit
+ *
+ * PARAMETERS: gpe_event_info - Info block for the GPE
+ * gpe_register_info - Info block for the GPE register
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given
+ * GPE set.
+ *
+ ******************************************************************************/
+
+u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
+ struct acpi_gpe_register_info *gpe_register_info)
+{
+ return (u32)1 << (gpe_event_info->gpe_number -
+ gpe_register_info->base_gpe_number);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_low_set_gpe
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
+ * action - Enable or disable
*
* RETURN: Status
*
- * DESCRIPTION: Disable a single GPE in the enable register.
+ * DESCRIPTION: Enable or disable a single GPE in its enable register.
*
******************************************************************************/
-acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
u32 enable_mask;
+ u32 register_bit;
+
+ ACPI_FUNCTION_ENTRY();
/* Get the info block for the entire GPE register */
return (status);
}
- /* Clear just the bit that corresponds to this GPE */
+ /* Set ot clear just the bit that corresponds to this GPE */
- ACPI_CLEAR_BIT(enable_mask, ((u32)1 <<
- (gpe_event_info->gpe_number -
- gpe_register_info->base_gpe_number)));
+ register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ gpe_register_info);
+ switch (action) {
+ case ACPI_GPE_COND_ENABLE:
+ if (!(register_bit & gpe_register_info->enable_for_run))
+ return (AE_BAD_PARAMETER);
+
+ case ACPI_GPE_ENABLE:
+ ACPI_SET_BIT(enable_mask, register_bit);
+ break;
+
+ case ACPI_GPE_DISABLE:
+ ACPI_CLEAR_BIT(enable_mask, register_bit);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Invalid action\n"));
+ return (AE_BAD_PARAMETER);
+ }
/* Write the updated enable mask */
acpi_status
acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
{
- struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
ACPI_FUNCTION_ENTRY();
- /* Get the info block for the entire GPE register */
-
- gpe_register_info = gpe_event_info->register_info;
- if (!gpe_register_info) {
- return (AE_NOT_EXIST);
- }
-
- /* Write the entire GPE (runtime) enable register */
-
- status = acpi_hw_write(gpe_register_info->enable_for_run,
- &gpe_register_info->enable_address);
-
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
return (status);
}
acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
{
+ struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
- u8 register_bit;
+ u32 register_bit;
ACPI_FUNCTION_ENTRY();
- register_bit = (u8)(1 <<
- (gpe_event_info->gpe_number -
- gpe_event_info->register_info->base_gpe_number));
+ /* Get the info block for the entire GPE register */
+
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ return (AE_NOT_EXIST);
+ }
+
+ register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ gpe_register_info);
/*
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
status = acpi_hw_write(register_bit,
- &gpe_event_info->register_info->status_address);
+ &gpe_register_info->status_address);
return (status);
}
acpi_event_status * event_status)
{
u32 in_byte;
- u8 register_bit;
+ u32 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
acpi_event_status local_event_status = 0;
/* Get the register bitmask for this GPE */
- register_bit = (u8)(1 <<
- (gpe_event_info->gpe_number -
- gpe_event_info->register_info->base_gpe_number));
+ register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ gpe_register_info);
/* GPE currently enabled? (enabled for runtime?) */
u32 one_byte;
u32 i;
+ /* Truncate address to 16 bits if requested */
+
+ if (acpi_gbl_truncate_io_addresses) {
+ address &= ACPI_UINT16_MAX;
+ }
+
/* Validate the entire request and perform the I/O */
status = acpi_hw_validate_io_request(address, width);
acpi_status status;
u32 i;
+ /* Truncate address to 16 bits if requested */
+
+ if (acpi_gbl_truncate_io_addresses) {
+ address &= ACPI_UINT16_MAX;
+ }
+
/* Validate the entire request and perform the I/O */
status = acpi_hw_validate_io_request(address, width);
acpi_ns_init_one_device, NULL, &info,
NULL);
+ /*
+ * Any _OSI requests should be completed by now. If the BIOS has
+ * requested any Windows OSI strings, we will always truncate
+ * I/O addresses to 16 bits -- for Windows compatibility.
+ */
+ if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
+ acpi_gbl_truncate_io_addresses = TRUE;
+ }
+
ACPI_FREE(info.evaluate_info);
if (ACPI_FAILURE(status)) {
goto error_exit;
},
},
{
+ .callback = dmi_disable_osi_vista,
+ .ident = "VGN-NS50B_L",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
+ },
+ },
+ {
.callback = dmi_disable_osi_win7,
.ident = "ASUS K50IJ",
.matches = {
/* Button's GPE is run-wake GPE */
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE_RUN);
+ ACPI_GPE_TYPE_RUNTIME);
device->wakeup.run_wake_count++;
device->wakeup.state.enabled = 1;
}
if (device->wakeup.flags.valid) {
acpi_disable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE_RUN);
+ ACPI_GPE_TYPE_RUNTIME);
device->wakeup.run_wake_count--;
device->wakeup.state.enabled = 0;
}
{
int result = 0;
-
#ifdef CONFIG_ACPI_PROCFS
acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
if (!acpi_fan_dir)
result = acpi_bus_register_driver(&acpi_fan_driver);
if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+#endif
return -ENODEV;
}
return 0;
}
+#ifdef CONFIG_SMP
+ if (pr->id >= setup_max_cpus && pr->id != 0)
+ return 0;
+#endif
+
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
/*
{
int error = acpi_sleep_prepare(acpi_target_sleep_state);
+ suspend_nvs_save();
+
if (error)
acpi_target_sleep_state = ACPI_STATE_S0;
return error;
{
u32 acpi_state = acpi_target_sleep_state;
+ suspend_nvs_free();
+ acpi_ec_unblock_transactions();
+
if (acpi_state == ACPI_STATE_S0)
return;
u32 acpi_state = acpi_suspend_states[pm_state];
int error = 0;
+ error = suspend_nvs_alloc();
+
+ if (error)
+ return error;
+
if (sleep_states[acpi_state]) {
acpi_target_sleep_state = acpi_state;
acpi_sleep_tts_switch(acpi_target_sleep_state);
if (acpi_state == ACPI_STATE_S3)
acpi_restore_state_mem();
+ suspend_nvs_restore();
+
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
static void acpi_suspend_finish(void)
{
- acpi_ec_unblock_transactions();
acpi_pm_finish();
}
{
int error;
- error = s4_no_nvs ? 0 : hibernate_nvs_alloc();
+ error = s4_no_nvs ? 0 : suspend_nvs_alloc();
if (!error) {
acpi_target_sleep_state = ACPI_STATE_S4;
acpi_sleep_tts_switch(acpi_target_sleep_state);
int error = acpi_pm_prepare();
if (!error)
- hibernate_nvs_save();
+ suspend_nvs_save();
return error;
}
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
-static void acpi_hibernation_finish(void)
-{
- hibernate_nvs_free();
- acpi_ec_unblock_transactions();
- acpi_pm_finish();
-}
-
static void acpi_hibernation_leave(void)
{
/*
panic("ACPI S4 hardware signature mismatch");
}
/* Restore the NVS memory area */
- hibernate_nvs_restore();
+ suspend_nvs_restore();
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions_early();
}
.begin = acpi_hibernation_begin,
.end = acpi_pm_end,
.pre_snapshot = acpi_hibernation_pre_snapshot,
- .finish = acpi_hibernation_finish,
+ .finish = acpi_pm_finish,
.prepare = acpi_pm_prepare,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
if (!error) {
if (!s4_no_nvs)
- error = hibernate_nvs_alloc();
+ error = suspend_nvs_alloc();
if (!error)
acpi_target_sleep_state = ACPI_STATE_S4;
}
static int acpi_hibernation_pre_snapshot_old(void)
{
acpi_pm_freeze();
- hibernate_nvs_save();
+ suspend_nvs_save();
return 0;
}
.begin = acpi_hibernation_begin_old,
.end = acpi_pm_end,
.pre_snapshot = acpi_hibernation_pre_snapshot_old,
- .finish = acpi_hibernation_finish,
.prepare = acpi_pm_freeze,
+ .finish = acpi_pm_finish,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
.pre_restore = acpi_pm_freeze,
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
- result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE);
+ result = acpi_disable_gpe(handle, index,
+ ACPI_GPE_TYPE_RUNTIME);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
- result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
+ result = acpi_enable_gpe(handle, index,
+ ACPI_GPE_TYPE_RUNTIME);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
- if (!dev->wakeup.flags.valid)
- continue;
-
- if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
+ if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
|| sleep_state > (u32) dev->wakeup.sleep_state)
continue;
/* The wake-up power should have been enabled already. */
- acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
- ACPI_GPE_ENABLE);
+ acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ ACPI_GPE_TYPE_WAKE);
}
}
|| (sleep_state > (u32) dev->wakeup.sleep_state))
continue;
+ acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ ACPI_GPE_TYPE_WAKE);
acpi_disable_wakeup_device_power(dev);
}
}
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
- /* In case user doesn't load button driver */
- if (!dev->wakeup.flags.always_enabled ||
- dev->wakeup.state.enabled)
- continue;
- acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE);
- dev->wakeup.state.enabled = 1;
+ if (dev->wakeup.flags.always_enabled)
+ dev->wakeup.state.enabled = 1;
}
mutex_unlock(&acpi_device_lock);
return 0;
case KEY_RIGHTALT:
if (value)
sysrq_alt = code;
- else if (sysrq_down && code == sysrq_alt_use)
- sysrq_down = false;
+ else {
+ if (sysrq_down && code == sysrq_alt_use)
+ sysrq_down = false;
+
+ sysrq_alt = 0;
+ }
break;
case KEY_SYSRQ:
u8 algorithm[4];
u8 encscheme[2];
u8 sigscheme[2];
+ __be32 paramsize;
u8 parameters[12]; /*assuming RSA*/
__be32 keysize;
u8 modulus[256];
clk_disable(p->clk);
/* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 10;
+ cs->shift = 0;
cs->mult = clocksource_hz2mult(p->rate, cs->shift);
dev_info(&p->pdev->dev, "used as clock source\n");
iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
iattr->s_attr.dev_attr.show = show_label;
iattr->s_attr.index = k;
+ sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_temp;
iattr->s_attr.index = k;
+ sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
iattr->s_attr.dev_attr.show = show_amb_min;
iattr->s_attr.dev_attr.store = store_amb_min;
iattr->s_attr.index = k;
+ sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
iattr->s_attr.dev_attr.show = show_amb_mid;
iattr->s_attr.dev_attr.store = store_amb_mid;
iattr->s_attr.index = k;
+ sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
iattr->s_attr.dev_attr.show = show_amb_max;
iattr->s_attr.dev_attr.store = store_amb_max;
iattr->s_attr.index = k;
+ sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_alarm;
iattr->s_attr.index = k;
+ sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
return false;
- /* Differentiate between AM2+ (bad) and AM3 (good) */
+ /* DDR3 memory implies socket AM3, which is good */
pci_bus_read_config_dword(pdev->bus,
PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
REG_DCT0_CONFIG_HIGH, ®_dram_cfg);
- return !(reg_dram_cfg & DDR3_MODE);
+ if (reg_dram_cfg & DDR3_MODE)
+ return false;
+
+ /*
+ * Unfortunately it is possible to run a socket AM3 CPU with DDR2
+ * memory. We blacklist all the cores which do exist in socket AM2+
+ * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
+ * and AM3 formats, but that's the best we can do.
+ */
+ return boot_cpu_data.x86_model < 4 ||
+ (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
}
static int __devinit k10temp_probe(struct pci_dev *pdev,
int temp;
struct k8temp_data *data = k8temp_update_device(dev);
- if (data->swap_core_select)
+ if (data->swap_core_select && (data->sensorsp & SEL_CORE))
core = core ? 0 : 1;
temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
module will be called atakbd.
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EMBEDDED || !X86
+ tristate "AT keyboard" if EMBEDDED || !X86 || X86_MRST
default y
select SERIO
select SERIO_LIBPS2
unsigned char nextstate = read_state(lp);
if (lp->laststate != nextstate) {
- int key_down = nextstate <= ARRAY_SIZE(lp->btncode);
+ int key_down = nextstate < ARRAY_SIZE(lp->btncode);
unsigned short keycode = key_down ?
lp->btncode[nextstate] : lp->btncode[lp->laststate];
tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
default y
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
- (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
+ (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !X86_MRST
help
i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices,
(data[4] << 20) + (data[5] << 12) +
(data[6] << 4) + (data[7] >> 4);
- wacom->id[idx] = (data[2] << 4) | (data[3] >> 4);
+ wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
+ ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
- switch (wacom->id[idx]) {
+ switch (wacom->id[idx] & 0xfffff) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x20802: /* Intuos4 Classic Pen */
+ case 0x20802: /* Intuos4 Inking Pen */
case 0x012:
wacom->tool[idx] = BTN_TOOL_PENCIL;
break;
input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
- if ((data[5] & 0x1f) | (data[6] & 0x1f) | (data[1] & 0x1f) |
+ if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) |
data[2] | (data[3] & 0x1f) | data[4] | data[8] |
(data[7] & 0x01)) {
input_report_key(input, wacom->tool[1], 1);
return -EINVAL;
}
+ spi->bits_per_word = 16;
+ err = spi_setup(spi);
+ if (err) {
+ dev_dbg(&spi->dev, "spi master doesn't support 16 bits/word\n");
+ return err;
+ }
+
ts = kzalloc(sizeof(struct ad7877), GFP_KERNEL);
input_dev = input_allocate_device();
if (!ts || !input_dev) {
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
-#define dr32(reg) readl(de->regs + (reg))
-#define dw32(reg,val) writel((val), de->regs + (reg))
+#define dr32(reg) ioread32(de->regs + (reg))
+#define dw32(reg, val) iowrite32((val), de->regs + (reg))
static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
int value, boguscnt = 100000;
do {
value = dr32(ROMCmd);
+ rmb();
} while (value < 0 && --boguscnt > 0);
de->dev->dev_addr[i] = value;
udelay(1);
int dmar_disabled = 1;
#endif /*CONFIG_DMAR_DEFAULT_ON*/
-static int __initdata dmar_map_gfx = 1;
+static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
}
}
if (found) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
free_devinfo_mem(info);
domain_exit(domain);
domain = found;
} else {
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
}
- spin_unlock_irqrestore(&device_domain_lock, flags);
}
found_domain:
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
free_pgtable_page(dmar_domain->pgd);
- dmar_domain->pgd = (struct dma_pte *)dma_pte_addr(pte);
+ dmar_domain->pgd = (struct dma_pte *)
+ phys_to_virt(dma_pte_addr(pte));
}
dmar_domain->agaw--;
}
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
+
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
+ if (dev->revision == 0x07) {
+ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+ dmar_map_gfx = 0;
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
}
+EXPORT_SYMBOL_GPL(pci_msi_off);
#ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
* being registered. Consequently, the interrupt-based PCIe PME signaling will
* not be used by any PCIe root ports in that case.
*/
-static bool pcie_pme_disabled;
+static bool pcie_pme_disabled = true;
/*
* The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
static int __init pcie_pme_setup(char *str)
{
- if (!strcmp(str, "off"))
- pcie_pme_disabled = true;
- else if (!strcmp(str, "force"))
+ if (!strncmp(str, "auto", 4))
+ pcie_pme_disabled = false;
+ else if (!strncmp(str, "force", 5))
pcie_pme_force_enable = true;
- else if (!strcmp(str, "nomsi"))
- pcie_pme_msi_disabled = true;
+
+ str = strchr(str, ',');
+ if (str) {
+ str++;
+ str += strspn(str, " \t");
+ if (*str && !strcmp(str, "nomsi"))
+ pcie_pme_msi_disabled = true;
+ }
+
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
/* PM code called only in internal controller mode */
static void omap_lcdc_suspend(void)
{
- if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
- disable_controller();
- omap_stop_lcd_dma();
- }
+ omap_lcdc_set_update_mode(OMAPFB_UPDATE_DISABLED);
}
static void omap_lcdc_resume(void)
{
- if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
- setup_regs();
- load_palette();
- setup_lcd_dma();
- set_load_mode(OMAP_LCDC_LOAD_FRAME);
- enable_irqs(OMAP_LCDC_IRQ_DONE);
- enable_controller();
- }
+ omap_lcdc_set_update_mode(OMAPFB_AUTO_UPDATE);
}
static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include "omapfb.h"
#include "dispc.h"
static int rfbi_get_clocks(void)
{
- rfbi.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
+ rfbi.dss_ick = clk_get(&rfbi.fbdev->dssdev->dev, "ick");
if (IS_ERR(rfbi.dss_ick)) {
dev_err(rfbi.fbdev->dev, "can't get ick\n");
return PTR_ERR(rfbi.dss_ick);
}
- rfbi.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
+ rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "dss1_fck");
if (IS_ERR(rfbi.dss1_fck)) {
dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
clk_put(rfbi.dss_ick);
INIT_LIST_HEAD(&vp_dev->virtqueues);
spin_lock_init(&vp_dev->lock);
+ /* Disable MSI/MSIX to bring device to a known good state. */
+ pci_msi_off(pci_dev);
+
/* enable the device */
err = pci_enable_device(pci_dev);
if (err)
desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
- return vq->vring.num;
+ return -ENOMEM;
/* Transfer entries from the sg list into the indirect page */
for (i = 0; i < out; i++) {
} else {
wdt->users = 0;
}
- wdt->miscdev.minor = WATCHDOG_MINOR;
- wdt->miscdev.name = "watchdog";
- wdt->miscdev.fops = &at32_wdt_fops;
+
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+ wdt->miscdev.fops = &at32_wdt_fops;
+ wdt->miscdev.parent = &pdev->dev;
+
+ platform_set_drvdata(pdev, wdt);
if (at32_wdt_settimeout(timeout)) {
at32_wdt_settimeout(TIMEOUT_DEFAULT);
ret = misc_register(&wdt->miscdev);
if (ret) {
dev_dbg(&pdev->dev, "failed to register wdt miscdev\n");
- goto err_iounmap;
+ goto err_register;
}
- platform_set_drvdata(pdev, wdt);
- wdt->miscdev.parent = &pdev->dev;
dev_info(&pdev->dev,
"AT32AP700X WDT at 0x%p, timeout %d sec (nowayout=%d)\n",
wdt->regs, wdt->timeout, nowayout);
return 0;
+err_register:
+ platform_set_drvdata(pdev, NULL);
err_iounmap:
iounmap(wdt->regs);
err_free:
}
static struct platform_driver imx2_wdt_driver = {
- .probe = imx2_wdt_probe,
.remove = __exit_p(imx2_wdt_remove),
.shutdown = imx2_wdt_shutdown,
.driver = {
return 0;
}
+void cifs_drop_inode(struct inode *inode)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ return generic_drop_inode(inode);
+
+ return generic_delete_inode(inode);
+}
+
static const struct super_operations cifs_super_ops = {
.put_super = cifs_put_super,
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
.destroy_inode = cifs_destroy_inode,
-/* .drop_inode = generic_delete_inode,
- .delete_inode = cifs_delete_inode, */ /* Do not need above two
- functions unless later we add lazy close of inodes or unless the
+ .drop_inode = cifs_drop_inode,
+/* .delete_inode = cifs_delete_inode, */ /* Do not need above
+ function unless later we add lazy close of inodes or unless the
kernel forgets to call us with the same number of releases (closes)
as opens */
.show_options = cifs_show_options,
__u16 fileHandle, struct file *file,
struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt,
struct super_block *sb,
int mode, int oflags,
__u32 *poplock, __u16 *pnetfid, int xid);
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/file.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
}
write_unlock(&GlobalSMBSeslock);
+ file->private_data = pCifsFile;
+
return pCifsFile;
}
int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, struct super_block *sb,
- int mode, int oflags,
+ struct super_block *sb, int mode, int oflags,
__u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
cifs_fattr_to_inode(*pinode, &fattr);
}
- /*
- * cifs_fill_filedata() takes care of setting cifsFileInfo pointer to
- * file->private_data.
- */
- if (mnt) {
- struct cifsFileInfo *pfile_info;
-
- pfile_info = cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt,
- oflags);
- if (pfile_info == NULL)
- rc = -ENOMEM;
- }
-
posix_open_ret:
kfree(presp_data);
return rc;
int create_options = CREATE_NOT_DIR;
__u32 oplock = 0;
int oflags;
- bool posix_create = false;
/*
* BB below access is probably too much for mknod to request
* but we have to do query and setpathinfo so requesting
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_open(full_path, &newinode,
- nd ? nd->path.mnt : NULL,
inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
handled in posix open */
if (rc == 0) {
- posix_create = true;
if (newinode == NULL) /* query inode info */
goto cifs_create_get_file_info;
else /* success, no need to query */
else
cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
- /* nfsd case - nfs srv does not set nd */
- if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) {
- /* mknod case - do not leave file open */
- CIFSSMBClose(xid, tcon, fileHandle);
- } else if (!(posix_create) && (newinode)) {
+ if (newinode && nd && (nd->flags & LOOKUP_OPEN)) {
struct cifsFileInfo *pfile_info;
- /*
- * cifs_fill_filedata() takes care of setting cifsFileInfo
- * pointer to file->private_data.
- */
- pfile_info = cifs_new_fileinfo(newinode, fileHandle, NULL,
+ struct file *filp;
+
+ filp = lookup_instantiate_filp(nd, direntry, generic_file_open);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ CIFSSMBClose(xid, tcon, fileHandle);
+ goto cifs_create_out;
+ }
+
+ pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp,
nd->path.mnt, oflags);
- if (pfile_info == NULL)
+ if (pfile_info == NULL) {
+ fput(filp);
+ CIFSSMBClose(xid, tcon, fileHandle);
rc = -ENOMEM;
+ }
+ } else {
+ CIFSSMBClose(xid, tcon, fileHandle);
}
+
cifs_create_out:
kfree(buf);
kfree(full_path);
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
+ struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
struct file *filp;
if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
- rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
+ rc = cifs_posix_open(full_path, &newInode,
parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
else
direntry->d_op = &cifs_dentry_ops;
d_add(direntry, newInode);
- if (posix_open)
- filp = lookup_instantiate_filp(nd, direntry, NULL);
+ if (posix_open) {
+ filp = lookup_instantiate_filp(nd, direntry,
+ generic_file_open);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ goto lookup_out;
+ }
+
+ cfile = cifs_new_fileinfo(newInode, fileHandle, filp,
+ nd->path.mnt,
+ nd->intent.open.flags);
+ if (cfile == NULL) {
+ fput(filp);
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ rc = -ENOMEM;
+ goto lookup_out;
+ }
+ }
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
is a common return code */
}
+lookup_out:
kfree(full_path);
FreeXid(xid);
return ERR_PTR(rc);
return 0;
}
-static struct cifsFileInfo *
-cifs_fill_filedata(struct file *file)
-{
- struct list_head *tmp;
- struct cifsFileInfo *pCifsFile = NULL;
- struct cifsInodeInfo *pCifsInode = NULL;
-
- /* search inode for this file and fill in file->private_data */
- pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
- read_lock(&GlobalSMBSeslock);
- list_for_each(tmp, &pCifsInode->openFileList) {
- pCifsFile = list_entry(tmp, struct cifsFileInfo, flist);
- if ((pCifsFile->pfile == NULL) &&
- (pCifsFile->pid == current->tgid)) {
- /* mode set in cifs_create */
-
- /* needed for writepage */
- pCifsFile->pfile = file;
- file->private_data = pCifsFile;
- break;
- }
- }
- read_unlock(&GlobalSMBSeslock);
-
- if (file->private_data != NULL) {
- return pCifsFile;
- } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
- cERROR(1, "could not find file instance for "
- "new file %p", file);
- return NULL;
-}
-
/* all arguments to this function must be checked for validity in caller */
-static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
- struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
+static inline int cifs_open_inode_helper(struct inode *inode,
struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
char *full_path, int xid)
{
+ struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
struct timespec temp;
int rc;
/* if not oplocked, invalidate inode pages if mtime or file
size changed */
temp = cifs_NTtimeToUnix(buf->LastWriteTime);
- if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
- (file->f_path.dentry->d_inode->i_size ==
+ if (timespec_equal(&inode->i_mtime, &temp) &&
+ (inode->i_size ==
(loff_t)le64_to_cpu(buf->EndOfFile))) {
cFYI(1, "inode unchanged on server");
} else {
- if (file->f_path.dentry->d_inode->i_mapping) {
+ if (inode->i_mapping) {
/* BB no need to lock inode until after invalidate
since namei code should already have it locked? */
- rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
+ rc = filemap_write_and_wait(inode->i_mapping);
if (rc != 0)
- CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
+ pCifsInode->write_behind_rc = rc;
}
cFYI(1, "invalidating remote inode since open detected it "
"changed");
- invalidate_remote_inode(file->f_path.dentry->d_inode);
+ invalidate_remote_inode(inode);
}
client_can_cache:
if (pTcon->unix_ext)
- rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
- full_path, inode->i_sb, xid);
+ rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
+ xid);
else
- rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
- full_path, buf, inode->i_sb, xid, NULL);
+ rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
+ xid, NULL);
if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, "Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode);
+ cFYI(1, "Exclusive Oplock granted on inode %p", inode);
} else if ((*oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
- struct cifsFileInfo *pCifsFile;
+ struct cifsFileInfo *pCifsFile = NULL;
struct cifsInodeInfo *pCifsInode;
char *full_path = NULL;
int desiredAccess;
tcon = cifs_sb->tcon;
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
- pCifsFile = cifs_fill_filedata(file);
- if (pCifsFile) {
- rc = 0;
- FreeXid(xid);
- return rc;
- }
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
- inode->i_sb,
+ rc = cifs_posix_open(full_path, &inode, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
if (rc == 0) {
/* no need for special case handling of setting mode
on read only files needed here */
- pCifsFile = cifs_fill_filedata(file);
- cifs_posix_open_inode_helper(inode, file, pCifsInode,
- oplock, netfid);
+ rc = cifs_posix_open_inode_helper(inode, file,
+ pCifsInode, oplock, netfid);
+ if (rc != 0) {
+ CIFSSMBClose(xid, tcon, netfid);
+ goto out;
+ }
+
+ pCifsFile = cifs_new_fileinfo(inode, netfid, file,
+ file->f_path.mnt,
+ oflags);
+ if (pCifsFile == NULL) {
+ CIFSSMBClose(xid, tcon, netfid);
+ rc = -ENOMEM;
+ }
goto out;
} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
if (tcon->ses->serverNOS)
goto out;
}
+ rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid);
+ if (rc != 0)
+ goto out;
+
pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
file->f_flags);
- file->private_data = pCifsFile;
- if (file->private_data == NULL) {
+ if (pCifsFile == NULL) {
rc = -ENOMEM;
goto out;
}
- rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
- &oplock, buf, full_path, xid);
-
if (oplock & CIFS_CREATE_ACTION) {
/* time to set mode which we can not set earlier due to
problems creating new read-only files */
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
- inode->i_sb,
+ rc = cifs_posix_open(full_path, NULL, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
if (rc == 0) {
if (rc == 0 || rc != -ETXTBSY)
return rc;
+ /* open-file renames don't work across directories */
+ if (to_dentry->d_parent != from_dentry->d_parent)
+ return rc;
+
/* open the file to be renamed -- we need DELETE perms */
rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
CREATE_NOT_DIR, &srcfid, &oplock, NULL,
/* calculate session key */
setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
- if (first_time) /* should this be moved into common code
- with similar ntlmv2 path? */
- /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
- response BB FIXME, v2_sess_key); */
-
- /* copy session key */
-
- /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
- bcc_ptr += LM2_SESS_KEY_SIZE; */
+ /* FIXME: calculate MAC key */
memcpy(bcc_ptr, (char *)v2_sess_key,
sizeof(struct ntlmv2_resp));
bcc_ptr += sizeof(struct ntlmv2_resp);
return error;
else {
inode->i_mode = mode;
+ inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
if (error == 0)
acl = NULL;
return error;
else {
inode->i_mode = mode;
+ inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
if (error == 0)
acl = NULL;
for (pp = np->properties; pp != NULL; pp = pp->next) {
p = pp->name;
+ if (strchr(p, '/'))
+ continue;
+
if (duplicate_name(de, p))
p = fixup_name(np, de, p);
"AE_NO_GLOBAL_LOCK",
"AE_ABORT_METHOD",
"AE_SAME_HANDLER",
- "AE_WAKE_ONLY_GPE",
+ "AE_NO_HANDLER",
"AE_OWNER_ID_LIMIT"
};
extern u32 acpi_gbl_trace_flags;
extern u8 acpi_gbl_enable_aml_debug_object;
extern u8 acpi_gbl_copy_dsdt_locally;
+extern u8 acpi_gbl_truncate_io_addresses;
extern u32 acpi_current_gpe_count;
extern struct acpi_table_fadt acpi_gbl_FADT;
#define ACPI_GPE_MAX 0xFF
#define ACPI_NUM_GPE 256
-/* Actions for acpi_set_gpe */
+/* Actions for acpi_set_gpe and acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
+#define ACPI_GPE_COND_ENABLE 2
/* gpe_types for acpi_enable_gpe and acpi_disable_gpe */
static inline bool system_entering_hibernation(void) { return false; }
#endif /* CONFIG_HIBERNATION */
-#ifdef CONFIG_HIBERNATION_NVS
-extern int hibernate_nvs_register(unsigned long start, unsigned long size);
-extern int hibernate_nvs_alloc(void);
-extern void hibernate_nvs_free(void);
-extern void hibernate_nvs_save(void);
-extern void hibernate_nvs_restore(void);
-#else /* CONFIG_HIBERNATION_NVS */
-static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
+#ifdef CONFIG_SUSPEND_NVS
+extern int suspend_nvs_register(unsigned long start, unsigned long size);
+extern int suspend_nvs_alloc(void);
+extern void suspend_nvs_free(void);
+extern void suspend_nvs_save(void);
+extern void suspend_nvs_restore(void);
+#else /* CONFIG_SUSPEND_NVS */
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
{
return 0;
}
-static inline int hibernate_nvs_alloc(void) { return 0; }
-static inline void hibernate_nvs_free(void) {}
-static inline void hibernate_nvs_save(void) {}
-static inline void hibernate_nvs_restore(void) {}
-#endif /* CONFIG_HIBERNATION_NVS */
+static inline int suspend_nvs_alloc(void) { return 0; }
+static inline void suspend_nvs_free(void) {}
+static inline void suspend_nvs_save(void) {}
+static inline void suspend_nvs_restore(void) {}
+#endif /* CONFIG_SUSPEND_NVS */
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
#ifdef CONFIG_SMP
/* Setup configured maximum number of CPUs to activate */
-unsigned int __initdata setup_max_cpus = NR_CPUS;
+unsigned int setup_max_cpus = NR_CPUS;
+EXPORT_SYMBOL(setup_max_cpus);
+
/*
* Setup routine for controlling SMP activation
depends on PM_ADVANCED_DEBUG
default n
+config SUSPEND_NVS
+ bool
+
config SUSPEND
bool "Suspend to RAM and standby"
depends on PM && ARCH_SUSPEND_POSSIBLE
+ select SUSPEND_NVS if HAS_IOMEM
default y
---help---
Allow the system to enter sleep states in which main memory is
Turning OFF this setting is NOT recommended! If in doubt, say Y.
-config HIBERNATION_NVS
- bool
-
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
- select HIBERNATION_NVS if HAS_IOMEM
+ select SUSPEND_NVS if HAS_IOMEM
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
block_io.o
-obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
+obj-$(CONFIG_SUSPEND_NVS) += nvs.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
/*
* Platforms, like ACPI, may want us to save some memory used by them during
- * hibernation and to restore the contents of this memory during the subsequent
+ * suspend and to restore the contents of this memory during the subsequent
* resume. The code below implements a mechanism allowing us to do that.
*/
static LIST_HEAD(nvs_list);
/**
- * hibernate_nvs_register - register platform NVS memory region to save
+ * suspend_nvs_register - register platform NVS memory region to save
* @start - physical address of the region
* @size - size of the region
*
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
-int hibernate_nvs_register(unsigned long start, unsigned long size)
+int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
}
/**
- * hibernate_nvs_free - free data pages allocated for saving NVS regions
+ * suspend_nvs_free - free data pages allocated for saving NVS regions
*/
-void hibernate_nvs_free(void)
+void suspend_nvs_free(void)
{
struct nvs_page *entry;
}
/**
- * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
+ * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
*/
-int hibernate_nvs_alloc(void)
+int suspend_nvs_alloc(void)
{
struct nvs_page *entry;
list_for_each_entry(entry, &nvs_list, node) {
entry->data = (void *)__get_free_page(GFP_KERNEL);
if (!entry->data) {
- hibernate_nvs_free();
+ suspend_nvs_free();
return -ENOMEM;
}
}
}
/**
- * hibernate_nvs_save - save NVS memory regions
+ * suspend_nvs_save - save NVS memory regions
*/
-void hibernate_nvs_save(void)
+void suspend_nvs_save(void)
{
struct nvs_page *entry;
}
/**
- * hibernate_nvs_restore - restore NVS memory regions
+ * suspend_nvs_restore - restore NVS memory regions
*
* This function is going to be called with interrupts disabled, so it
* cannot iounmap the virtual addresses used to access the NVS region.
*/
-void hibernate_nvs_restore(void)
+void suspend_nvs_restore(void)
{
struct nvs_page *entry;
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
#include "power.h"
int section = sechdr->sh_info;
return (void *)elf->hdr + sechdrs[section].sh_offset +
- (r->r_offset - sechdrs[section].sh_addr);
+ r->r_offset - sechdrs[section].sh_addr;
}
static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
ret = strncpy_from_user(type, _type, len);
if (ret < 0)
- return -EFAULT;
+ return ret;
if (ret == 0 || ret >= len)
return -EINVAL;
return old_setting;
error:
abort_creds(new);
- return -EINVAL;
+ return ret;
} /* end keyctl_set_reqkey_keyring() */
}
-static long outstream_get_space_available(struct hpi_hostbuffer_status
+static u32 outstream_get_space_available(struct hpi_hostbuffer_status
*status)
{
- return status->size_in_bytes - ((long)(status->host_index) -
- (long)(status->dSP_index));
+ return status->size_in_bytes - (status->host_index -
+ status->dSP_index);
}
static void outstream_write(struct hpi_adapter_obj *pao,
struct hpi_hw_obj *phw = pao->priv;
struct bus_master_interface *interface = phw->p_interface_buffer;
struct hpi_hostbuffer_status *status;
- long space_available;
+ u32 space_available;
if (!phw->outstream_host_buffer_size[phm->obj_index]) {
/* there is no BBM buffer, write via message */
}
space_available = outstream_get_space_available(status);
- if (space_available < (long)phm->u.d.u.data.data_size) {
+ if (space_available < phm->u.d.u.data.data_size) {
phr->error = HPI_ERROR_INVALID_DATASIZE;
return;
}
&& hpios_locked_mem_valid(&phw->outstream_host_buffers[phm->
obj_index])) {
u8 *p_bbm_data;
- long l_first_write;
+ u32 l_first_write;
u8 *p_app_data = (u8 *)phm->u.d.u.data.pb_data;
if (hpios_locked_mem_get_virt_addr(&phw->
hw_message(pao, phm, phr);
}
-static long instream_get_bytes_available(struct hpi_hostbuffer_status *status)
+static u32 instream_get_bytes_available(struct hpi_hostbuffer_status *status)
{
- return (long)(status->dSP_index) - (long)(status->host_index);
+ return status->dSP_index - status->host_index;
}
static void instream_read(struct hpi_adapter_obj *pao,
struct hpi_hw_obj *phw = pao->priv;
struct bus_master_interface *interface = phw->p_interface_buffer;
struct hpi_hostbuffer_status *status;
- long data_available;
+ u32 data_available;
u8 *p_bbm_data;
- long l_first_read;
+ u32 l_first_read;
u8 *p_app_data = (u8 *)phm->u.d.u.data.pb_data;
if (!phw->instream_host_buffer_size[phm->obj_index]) {
status = &interface->instream_host_buffer_status[phm->obj_index];
data_available = instream_get_bytes_available(status);
- if (data_available < (long)phm->u.d.u.data.data_size) {
+ if (data_available < phm->u.d.u.data.data_size) {
phr->error = HPI_ERROR_INVALID_DATASIZE;
return;
}
}
/* assign Capture Source enums to NID */
- kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
- if (!kctl)
- kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
- for (i = 0; kctl && i < kctl->count; i++) {
- hda_nid_t *nids = spec->capsrc_nids;
- if (!nids)
- nids = spec->adc_nids;
- err = snd_hda_add_nid(codec, kctl, i, nids[i]);
- if (err < 0)
- return err;
+ if (spec->capsrc_nids || spec->adc_nids) {
+ kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
+ if (!kctl)
+ kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
+ for (i = 0; kctl && i < kctl->count; i++) {
+ hda_nid_t *nids = spec->capsrc_nids;
+ if (!nids)
+ nids = spec->adc_nids;
+ err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+ if (err < 0)
+ return err;
+ }
}
if (spec->cap_mixer) {
const char *kname = kctl ? kctl->id.name : NULL;
.num_items = 3,
.items = {
{ "Mic", 0x1 },
- { "Line", 0x2 },
+ { "Line", 0x7 },
{ "CD", 0x4 },
},
};
HDA_BIND_MUTE ("LFE Playback Switch", 0x0e, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0f, 0x00, HDA_OUTPUT),
HDA_BIND_MUTE ("Headphone Playback Switch", 0x0f, 0x02, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
- HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x07, HDA_INPUT),
+ HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE ("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0x1)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0x7)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0x4)},
{ }
};
NUMDMA_MASK);
mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
- mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
}
if (dev->rxnumevt && stream == SNDRV_PCM_STREAM_CAPTURE) {
NUMDMA_MASK);
mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
- mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
}
}
};
/* Utility for retrieving psc_dma_stream structure from a substream */
-inline struct psc_dma_stream *
+static inline struct psc_dma_stream *
to_psc_dma_stream(struct snd_pcm_substream *substream, struct psc_dma *psc_dma)
{
if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0),
UAC2_CS_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
- UAC2_CX_CLOCK_SELECTOR << 8, selector_id << 8,
+ UAC2_CX_CLOCK_SELECTOR << 8,
+ snd_usb_ctrl_intf(chip) | (selector_id << 8),
&buf, sizeof(buf), 1000);
if (ret < 0)
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- UAC2_CS_CONTROL_CLOCK_VALID << 8, source_id << 8,
+ UAC2_CS_CONTROL_CLOCK_VALID << 8,
+ snd_usb_ctrl_intf(chip) | (source_id << 8),
&data, sizeof(data), 1000);
if (err < 0) {
data[3] = rate >> 24;
if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- UAC2_CS_CONTROL_SAM_FREQ << 8, clock << 8,
+ UAC2_CS_CONTROL_SAM_FREQ << 8,
+ snd_usb_ctrl_intf(chip) | (clock << 8),
data, sizeof(data), 1000)) < 0) {
snd_printk(KERN_ERR "%d:%d:%d: cannot set freq %d (v2)\n",
dev->devnum, iface, fmt->altsetting, rate);
if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- UAC2_CS_CONTROL_SAM_FREQ << 8, clock << 8,
+ UAC2_CS_CONTROL_SAM_FREQ << 8,
+ snd_usb_ctrl_intf(chip) | (clock << 8),
data, sizeof(data), 1000)) < 0) {
snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n",
dev->devnum, iface, fmt->altsetting);
if (snd_usb_parse_audio_format(chip, fp, format, fmt, stream, alts) < 0) {
kfree(fp->rate_table);
kfree(fp);
+ fp = NULL;
continue;
}
return 0;
}
+/*
+ * Helper function to walk the array of sample rate triplets reported by
+ * the device. The problem is that we need to parse whole array first to
+ * get to know how many sample rates we have to expect.
+ * Then fp->rate_table can be allocated and filled.
+ */
+static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets,
+ const unsigned char *data)
+{
+ int i, nr_rates = 0;
+
+ fp->rates = fp->rate_min = fp->rate_max = 0;
+
+ for (i = 0; i < nr_triplets; i++) {
+ int min = combine_quad(&data[2 + 12 * i]);
+ int max = combine_quad(&data[6 + 12 * i]);
+ int res = combine_quad(&data[10 + 12 * i]);
+ int rate;
+
+ if ((max < 0) || (min < 0) || (res < 0) || (max < min))
+ continue;
+
+ /*
+ * for ranges with res == 1, we announce a continuous sample
+ * rate range, and this function should return 0 for no further
+ * parsing.
+ */
+ if (res == 1) {
+ fp->rate_min = min;
+ fp->rate_max = max;
+ fp->rates = SNDRV_PCM_RATE_CONTINUOUS;
+ return 0;
+ }
+
+ for (rate = min; rate <= max; rate += res) {
+ if (fp->rate_table)
+ fp->rate_table[nr_rates] = rate;
+ if (!fp->rate_min || rate < fp->rate_min)
+ fp->rate_min = rate;
+ if (!fp->rate_max || rate > fp->rate_max)
+ fp->rate_max = rate;
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+
+ nr_rates++;
+
+ /* avoid endless loop */
+ if (res == 0)
+ break;
+ }
+ }
+
+ return nr_rates;
+}
+
/*
* parse the format descriptor and stores the possible sample rates
* on the audioformat table (audio class v2).
{
struct usb_device *dev = chip->dev;
unsigned char tmp[2], *data;
- int i, nr_rates, data_size, ret = 0;
+ int nr_triplets, data_size, ret = 0;
int clock = snd_usb_clock_find_source(chip, chip->ctrl_intf, fp->clock);
+ if (clock < 0) {
+ snd_printk(KERN_ERR "%s(): unable to find clock source (clock %d)\n",
+ __func__, clock);
+ goto err;
+ }
+
/* get the number of sample rates first by only fetching 2 bytes */
ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- UAC2_CS_CONTROL_SAM_FREQ << 8, clock << 8,
+ UAC2_CS_CONTROL_SAM_FREQ << 8,
+ snd_usb_ctrl_intf(chip) | (clock << 8),
tmp, sizeof(tmp), 1000);
if (ret < 0) {
goto err;
}
- nr_rates = (tmp[1] << 8) | tmp[0];
- data_size = 2 + 12 * nr_rates;
+ nr_triplets = (tmp[1] << 8) | tmp[0];
+ data_size = 2 + 12 * nr_triplets;
data = kzalloc(data_size, GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
/* now get the full information */
ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- UAC2_CS_CONTROL_SAM_FREQ << 8, clock << 8,
+ UAC2_CS_CONTROL_SAM_FREQ << 8,
+ snd_usb_ctrl_intf(chip) | (clock << 8),
data, data_size, 1000);
if (ret < 0) {
goto err_free;
}
- fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
+ /* Call the triplet parser, and make sure fp->rate_table is NULL.
+ * We just use the return value to know how many sample rates we
+ * will have to deal with. */
+ kfree(fp->rate_table);
+ fp->rate_table = NULL;
+ fp->nr_rates = parse_uac2_sample_rate_range(fp, nr_triplets, data);
+
+ if (fp->nr_rates == 0) {
+ /* SNDRV_PCM_RATE_CONTINUOUS */
+ ret = 0;
+ goto err_free;
+ }
+
+ fp->rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL);
if (!fp->rate_table) {
ret = -ENOMEM;
goto err_free;
}
- fp->nr_rates = 0;
- fp->rate_min = fp->rate_max = 0;
-
- for (i = 0; i < nr_rates; i++) {
- int rate = combine_quad(&data[2 + 12 * i]);
-
- fp->rate_table[fp->nr_rates] = rate;
- if (!fp->rate_min || rate < fp->rate_min)
- fp->rate_min = rate;
- if (!fp->rate_max || rate > fp->rate_max)
- fp->rate_max = rate;
- fp->rates |= snd_pcm_rate_to_rate_bit(rate);
- fp->nr_rates++;
- }
+ /* Call the triplet parser again, but this time, fp->rate_table is
+ * allocated, so the rates will be stored */
+ parse_uac2_sample_rate_range(fp, nr_triplets, data);
err_free:
kfree(data);
#define snd_usb_get_speed(dev) ((dev)->speed)
#endif
+static inline int snd_usb_ctrl_intf(struct snd_usb_audio *chip)
+{
+ return get_iface_desc(chip->ctrl_intf)->bInterfaceNumber;
+}
#endif /* __USBAUDIO_HELPER_H */
static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
{
- unsigned char buf[14]; /* enough space for one range of 4 bytes */
+ unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */
unsigned char *val;
- int ret;
+ int ret, size;
__u8 bRequest;
- bRequest = (request == UAC_GET_CUR) ?
- UAC2_CS_CUR : UAC2_CS_RANGE;
+ if (request == UAC_GET_CUR) {
+ bRequest = UAC2_CS_CUR;
+ size = sizeof(__u16);
+ } else {
+ bRequest = UAC2_CS_RANGE;
+ size = sizeof(buf);
+ }
+
+ memset(buf, 0, sizeof(buf));
ret = snd_usb_ctl_msg(cval->mixer->chip->dev,
usb_rcvctrlpipe(cval->mixer->chip->dev, 0),
bRequest,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, cval->mixer->ctrlif | (cval->id << 8),
- buf, sizeof(buf), 1000);
+ buf, size, 1000);
if (ret < 0) {
snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
return ret;
}
+ /* FIXME: how should we handle multiple triplets here? */
+
switch (request) {
case UAC_GET_CUR:
val = buf;